Merge tag 'v3.10.94' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / nbd.c
CommitLineData
1da177e4
LT
1/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
a2531293 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
dbf492d6 10 * This file is released under GPLv2 or later.
1da177e4 11 *
dbf492d6 12 * (part of code stolen from loop.c)
1da177e4
LT
13 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
2a48fc0a 27#include <linux/mutex.h>
4b2f0260
HX
28#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
5a0e3ad6 31#include <linux/slab.h>
1da177e4 32#include <net/sock.h>
91cf45f0 33#include <linux/net.h>
48cf6061 34#include <linux/kthread.h>
1da177e4 35
1da177e4
LT
36#include <asm/uaccess.h>
37#include <asm/types.h>
38
39#include <linux/nbd.h>
40
f4507164 41#define NBD_MAGIC 0x68797548
1da177e4
LT
42
43#ifdef NDEBUG
44#define dprintk(flags, fmt...)
45#else /* NDEBUG */
46#define dprintk(flags, fmt...) do { \
47 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
48} while (0)
49#define DBG_IOCTL 0x0004
50#define DBG_INIT 0x0010
51#define DBG_EXIT 0x0020
52#define DBG_BLKDEV 0x0100
53#define DBG_RX 0x0200
54#define DBG_TX 0x0400
55static unsigned int debugflags;
56#endif /* NDEBUG */
57
9c7a4169 58static unsigned int nbds_max = 16;
20a8143e 59static struct nbd_device *nbd_dev;
d71a6d73 60static int max_part;
1da177e4
LT
61
62/*
63 * Use just one lock (or at most 1 per NIC). Two arguments for this:
64 * 1. Each NIC is essentially a synchronization point for all servers
65 * accessed through that NIC so there's no need to have more locks
66 * than NICs anyway.
67 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
68 * down each lock to the point where they're actually slower than just
69 * a single lock.
70 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
71 */
72static DEFINE_SPINLOCK(nbd_lock);
73
74#ifndef NDEBUG
75static const char *ioctl_cmd_to_ascii(int cmd)
76{
77 switch (cmd) {
78 case NBD_SET_SOCK: return "set-sock";
79 case NBD_SET_BLKSIZE: return "set-blksize";
80 case NBD_SET_SIZE: return "set-size";
2f012508
PC
81 case NBD_SET_TIMEOUT: return "set-timeout";
82 case NBD_SET_FLAGS: return "set-flags";
1da177e4
LT
83 case NBD_DO_IT: return "do-it";
84 case NBD_CLEAR_SOCK: return "clear-sock";
85 case NBD_CLEAR_QUE: return "clear-que";
86 case NBD_PRINT_DEBUG: return "print-debug";
87 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
88 case NBD_DISCONNECT: return "disconnect";
89 case BLKROSET: return "set-read-only";
90 case BLKFLSBUF: return "flush-buffer-cache";
91 }
92 return "unknown";
93}
94
95static const char *nbdcmd_to_ascii(int cmd)
96{
97 switch (cmd) {
98 case NBD_CMD_READ: return "read";
99 case NBD_CMD_WRITE: return "write";
100 case NBD_CMD_DISC: return "disconnect";
75f187ab 101 case NBD_CMD_FLUSH: return "flush";
a336d298 102 case NBD_CMD_TRIM: return "trim/discard";
1da177e4
LT
103 }
104 return "invalid";
105}
106#endif /* NDEBUG */
107
108static void nbd_end_request(struct request *req)
109{
097c94a4 110 int error = req->errors ? -EIO : 0;
165125e1 111 struct request_queue *q = req->q;
1da177e4
LT
112 unsigned long flags;
113
114 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
097c94a4 115 req, error ? "failed" : "done");
1da177e4
LT
116
117 spin_lock_irqsave(q->queue_lock, flags);
1011c1b9 118 __blk_end_request_all(req, error);
1da177e4
LT
119 spin_unlock_irqrestore(q->queue_lock, flags);
120}
121
f4507164 122static void sock_shutdown(struct nbd_device *nbd, int lock)
7fdfd406
PC
123{
124 /* Forcibly shutdown the socket causing all listeners
125 * to error
126 *
127 * FIXME: This code is duplicated from sys_shutdown, but
128 * there should be a more generic interface rather than
129 * calling socket ops directly here */
130 if (lock)
f4507164
WG
131 mutex_lock(&nbd->tx_lock);
132 if (nbd->sock) {
133 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
134 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
135 nbd->sock = NULL;
7fdfd406
PC
136 }
137 if (lock)
f4507164 138 mutex_unlock(&nbd->tx_lock);
7fdfd406
PC
139}
140
141static void nbd_xmit_timeout(unsigned long arg)
142{
143 struct task_struct *task = (struct task_struct *)arg;
144
145 printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
146 task->comm, task->pid);
147 force_sig(SIGKILL, task);
148}
149
1da177e4
LT
150/*
151 * Send or receive packet.
152 */
f4507164 153static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
1da177e4
LT
154 int msg_flags)
155{
f4507164 156 struct socket *sock = nbd->sock;
1da177e4
LT
157 int result;
158 struct msghdr msg;
159 struct kvec iov;
be0ef957 160 sigset_t blocked, oldset;
7f338fe4 161 unsigned long pflags = current->flags;
1da177e4 162
ffc41cf8 163 if (unlikely(!sock)) {
f4507164 164 dev_err(disk_to_dev(nbd->disk),
7f1b90f9
WC
165 "Attempted %s on closed socket in sock_xmit\n",
166 (send ? "send" : "recv"));
ffc41cf8
MS
167 return -EINVAL;
168 }
169
1da177e4
LT
170 /* Allow interception of SIGKILL only
171 * Don't allow other signals to interrupt the transmission */
be0ef957
ON
172 siginitsetinv(&blocked, sigmask(SIGKILL));
173 sigprocmask(SIG_SETMASK, &blocked, &oldset);
1da177e4 174
7f338fe4 175 current->flags |= PF_MEMALLOC;
1da177e4 176 do {
7f338fe4 177 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
1da177e4
LT
178 iov.iov_base = buf;
179 iov.iov_len = size;
180 msg.msg_name = NULL;
181 msg.msg_namelen = 0;
182 msg.msg_control = NULL;
183 msg.msg_controllen = 0;
1da177e4
LT
184 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
185
7fdfd406
PC
186 if (send) {
187 struct timer_list ti;
188
f4507164 189 if (nbd->xmit_timeout) {
7fdfd406
PC
190 init_timer(&ti);
191 ti.function = nbd_xmit_timeout;
192 ti.data = (unsigned long)current;
f4507164 193 ti.expires = jiffies + nbd->xmit_timeout;
7fdfd406
PC
194 add_timer(&ti);
195 }
1da177e4 196 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
f4507164 197 if (nbd->xmit_timeout)
7fdfd406
PC
198 del_timer_sync(&ti);
199 } else
35fbf5bc
NK
200 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
201 msg.msg_flags);
1da177e4
LT
202
203 if (signal_pending(current)) {
204 siginfo_t info;
1da177e4 205 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
ba25f9dc 206 task_pid_nr(current), current->comm,
be0ef957 207 dequeue_signal_lock(current, &current->blocked, &info));
1da177e4 208 result = -EINTR;
f4507164 209 sock_shutdown(nbd, !send);
1da177e4
LT
210 break;
211 }
212
213 if (result <= 0) {
214 if (result == 0)
215 result = -EPIPE; /* short read */
216 break;
217 }
218 size -= result;
219 buf += result;
220 } while (size > 0);
221
be0ef957 222 sigprocmask(SIG_SETMASK, &oldset, NULL);
7f338fe4 223 tsk_restore_flags(current, pflags, PF_MEMALLOC);
1da177e4
LT
224
225 return result;
226}
227
f4507164 228static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
1da177e4
LT
229 int flags)
230{
231 int result;
232 void *kaddr = kmap(bvec->bv_page);
f4507164
WG
233 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
234 bvec->bv_len, flags);
1da177e4
LT
235 kunmap(bvec->bv_page);
236 return result;
237}
238
7fdfd406 239/* always call with the tx_lock held */
f4507164 240static int nbd_send_req(struct nbd_device *nbd, struct request *req)
1da177e4 241{
5705f702 242 int result, flags;
1da177e4 243 struct nbd_request request;
1011c1b9 244 unsigned long size = blk_rq_bytes(req);
1da177e4
LT
245
246 request.magic = htonl(NBD_REQUEST_MAGIC);
247 request.type = htonl(nbd_cmd(req));
75f187ab
AB
248
249 if (nbd_cmd(req) == NBD_CMD_FLUSH) {
250 /* Other values are reserved for FLUSH requests. */
251 request.from = 0;
252 request.len = 0;
253 } else {
254 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
255 request.len = htonl(size);
256 }
1da177e4
LT
257 memcpy(request.handle, &req, sizeof(req));
258
83096ebf 259 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
f4507164 260 nbd->disk->disk_name, req,
1da177e4 261 nbdcmd_to_ascii(nbd_cmd(req)),
83096ebf 262 (unsigned long long)blk_rq_pos(req) << 9,
1011c1b9 263 blk_rq_bytes(req));
f4507164 264 result = sock_xmit(nbd, 1, &request, sizeof(request),
7fdfd406 265 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
1da177e4 266 if (result <= 0) {
f4507164 267 dev_err(disk_to_dev(nbd->disk),
7f1b90f9 268 "Send control failed (result %d)\n", result);
1da177e4
LT
269 goto error_out;
270 }
271
272 if (nbd_cmd(req) == NBD_CMD_WRITE) {
5705f702
N
273 struct req_iterator iter;
274 struct bio_vec *bvec;
1da177e4
LT
275 /*
276 * we are really probing at internals to determine
277 * whether to set MSG_MORE or not...
278 */
5705f702 279 rq_for_each_segment(bvec, req, iter) {
6c92e699
JA
280 flags = 0;
281 if (!rq_iter_last(req, iter))
282 flags = MSG_MORE;
283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
f4507164
WG
284 nbd->disk->disk_name, req, bvec->bv_len);
285 result = sock_send_bvec(nbd, bvec, flags);
6c92e699 286 if (result <= 0) {
f4507164 287 dev_err(disk_to_dev(nbd->disk),
7f1b90f9
WC
288 "Send data failed (result %d)\n",
289 result);
6c92e699
JA
290 goto error_out;
291 }
1da177e4
LT
292 }
293 }
1da177e4
LT
294 return 0;
295
296error_out:
15746fca 297 return -EIO;
1da177e4
LT
298}
299
f4507164 300static struct request *nbd_find_request(struct nbd_device *nbd,
0cbc591b 301 struct request *xreq)
1da177e4 302{
d2c9740b 303 struct request *req, *tmp;
4b2f0260 304 int err;
1da177e4 305
f4507164 306 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
4b2f0260
HX
307 if (unlikely(err))
308 goto out;
309
f4507164
WG
310 spin_lock(&nbd->queue_lock);
311 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
1da177e4
LT
312 if (req != xreq)
313 continue;
314 list_del_init(&req->queuelist);
f4507164 315 spin_unlock(&nbd->queue_lock);
1da177e4
LT
316 return req;
317 }
f4507164 318 spin_unlock(&nbd->queue_lock);
4b2f0260
HX
319
320 err = -ENOENT;
321
322out:
323 return ERR_PTR(err);
1da177e4
LT
324}
325
f4507164 326static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
1da177e4
LT
327{
328 int result;
329 void *kaddr = kmap(bvec->bv_page);
f4507164 330 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
1da177e4
LT
331 MSG_WAITALL);
332 kunmap(bvec->bv_page);
333 return result;
334}
335
336/* NULL returned = something went wrong, inform userspace */
f4507164 337static struct request *nbd_read_stat(struct nbd_device *nbd)
1da177e4
LT
338{
339 int result;
340 struct nbd_reply reply;
341 struct request *req;
1da177e4
LT
342
343 reply.magic = 0;
f4507164 344 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
1da177e4 345 if (result <= 0) {
f4507164 346 dev_err(disk_to_dev(nbd->disk),
7f1b90f9 347 "Receive control failed (result %d)\n", result);
1da177e4
LT
348 goto harderror;
349 }
e4b57e08
MF
350
351 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
f4507164 352 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
e4b57e08
MF
353 (unsigned long)ntohl(reply.magic));
354 result = -EPROTO;
355 goto harderror;
356 }
357
f4507164 358 req = nbd_find_request(nbd, *(struct request **)reply.handle);
801678c5 359 if (IS_ERR(req)) {
4b2f0260
HX
360 result = PTR_ERR(req);
361 if (result != -ENOENT)
362 goto harderror;
363
f4507164 364 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
7f1b90f9 365 reply.handle);
1da177e4
LT
366 result = -EBADR;
367 goto harderror;
368 }
369
1da177e4 370 if (ntohl(reply.error)) {
f4507164 371 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
7f1b90f9 372 ntohl(reply.error));
1da177e4
LT
373 req->errors++;
374 return req;
375 }
376
377 dprintk(DBG_RX, "%s: request %p: got reply\n",
f4507164 378 nbd->disk->disk_name, req);
1da177e4 379 if (nbd_cmd(req) == NBD_CMD_READ) {
5705f702
N
380 struct req_iterator iter;
381 struct bio_vec *bvec;
382
383 rq_for_each_segment(bvec, req, iter) {
f4507164 384 result = sock_recv_bvec(nbd, bvec);
6c92e699 385 if (result <= 0) {
f4507164 386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
7f1b90f9 387 result);
6c92e699
JA
388 req->errors++;
389 return req;
390 }
391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
f4507164 392 nbd->disk->disk_name, req, bvec->bv_len);
1da177e4
LT
393 }
394 }
395 return req;
396harderror:
f4507164 397 nbd->harderror = result;
1da177e4
LT
398 return NULL;
399}
400
edfaa7c3
KS
401static ssize_t pid_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
6b39bb65 403{
edfaa7c3
KS
404 struct gendisk *disk = dev_to_disk(dev);
405
406 return sprintf(buf, "%ld\n",
6b39bb65
PC
407 (long) ((struct nbd_device *)disk->private_data)->pid);
408}
409
edfaa7c3 410static struct device_attribute pid_attr = {
01e8ef11 411 .attr = { .name = "pid", .mode = S_IRUGO},
6b39bb65
PC
412 .show = pid_show,
413};
414
f4507164 415static int nbd_do_it(struct nbd_device *nbd)
1da177e4
LT
416{
417 struct request *req;
84963048 418 int ret;
1da177e4 419
f4507164 420 BUG_ON(nbd->magic != NBD_MAGIC);
1da177e4 421
7f338fe4 422 sk_set_memalloc(nbd->sock->sk);
f4507164
WG
423 nbd->pid = task_pid_nr(current);
424 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
84963048 425 if (ret) {
f4507164
WG
426 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
427 nbd->pid = 0;
84963048
WC
428 return ret;
429 }
6b39bb65 430
f4507164 431 while ((req = nbd_read_stat(nbd)) != NULL)
1da177e4 432 nbd_end_request(req);
6b39bb65 433
f4507164
WG
434 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
435 nbd->pid = 0;
84963048 436 return 0;
1da177e4
LT
437}
438
f4507164 439static void nbd_clear_que(struct nbd_device *nbd)
1da177e4
LT
440{
441 struct request *req;
442
f4507164 443 BUG_ON(nbd->magic != NBD_MAGIC);
1da177e4 444
4b2f0260 445 /*
f4507164 446 * Because we have set nbd->sock to NULL under the tx_lock, all
4b2f0260
HX
447 * modifications to the list must have completed by now. For
448 * the same reason, the active_req must be NULL.
449 *
450 * As a consequence, we don't need to take the spin lock while
451 * purging the list here.
452 */
f4507164
WG
453 BUG_ON(nbd->sock);
454 BUG_ON(nbd->active_req);
4b2f0260 455
f4507164
WG
456 while (!list_empty(&nbd->queue_head)) {
457 req = list_entry(nbd->queue_head.next, struct request,
4b2f0260
HX
458 queuelist);
459 list_del_init(&req->queuelist);
460 req->errors++;
461 nbd_end_request(req);
462 }
fded4e09
PC
463
464 while (!list_empty(&nbd->waiting_queue)) {
465 req = list_entry(nbd->waiting_queue.next, struct request,
466 queuelist);
467 list_del_init(&req->queuelist);
468 req->errors++;
469 nbd_end_request(req);
470 }
1da177e4
LT
471}
472
7fdfd406 473
f4507164 474static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
48cf6061 475{
33659ebb 476 if (req->cmd_type != REQ_TYPE_FS)
48cf6061
LV
477 goto error_out;
478
479 nbd_cmd(req) = NBD_CMD_READ;
480 if (rq_data_dir(req) == WRITE) {
a336d298
PC
481 if ((req->cmd_flags & REQ_DISCARD)) {
482 WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM));
483 nbd_cmd(req) = NBD_CMD_TRIM;
484 } else
485 nbd_cmd(req) = NBD_CMD_WRITE;
2f012508 486 if (nbd->flags & NBD_FLAG_READ_ONLY) {
f4507164 487 dev_err(disk_to_dev(nbd->disk),
7f1b90f9 488 "Write on read-only\n");
48cf6061
LV
489 goto error_out;
490 }
491 }
492
75f187ab
AB
493 if (req->cmd_flags & REQ_FLUSH) {
494 BUG_ON(unlikely(blk_rq_sectors(req)));
495 nbd_cmd(req) = NBD_CMD_FLUSH;
496 }
497
48cf6061
LV
498 req->errors = 0;
499
f4507164
WG
500 mutex_lock(&nbd->tx_lock);
501 if (unlikely(!nbd->sock)) {
502 mutex_unlock(&nbd->tx_lock);
503 dev_err(disk_to_dev(nbd->disk),
7f1b90f9 504 "Attempted send on closed socket\n");
15746fca 505 goto error_out;
48cf6061
LV
506 }
507
f4507164 508 nbd->active_req = req;
48cf6061 509
f4507164
WG
510 if (nbd_send_req(nbd, req) != 0) {
511 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
48cf6061
LV
512 req->errors++;
513 nbd_end_request(req);
514 } else {
f4507164 515 spin_lock(&nbd->queue_lock);
01ff5dbc 516 list_add_tail(&req->queuelist, &nbd->queue_head);
f4507164 517 spin_unlock(&nbd->queue_lock);
48cf6061
LV
518 }
519
f4507164
WG
520 nbd->active_req = NULL;
521 mutex_unlock(&nbd->tx_lock);
522 wake_up_all(&nbd->active_wq);
48cf6061
LV
523
524 return;
525
526error_out:
527 req->errors++;
528 nbd_end_request(req);
529}
530
531static int nbd_thread(void *data)
532{
f4507164 533 struct nbd_device *nbd = data;
48cf6061
LV
534 struct request *req;
535
536 set_user_nice(current, -20);
f4507164 537 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
48cf6061 538 /* wait for something to do */
f4507164 539 wait_event_interruptible(nbd->waiting_wq,
48cf6061 540 kthread_should_stop() ||
f4507164 541 !list_empty(&nbd->waiting_queue));
48cf6061
LV
542
543 /* extract request */
f4507164 544 if (list_empty(&nbd->waiting_queue))
48cf6061
LV
545 continue;
546
f4507164
WG
547 spin_lock_irq(&nbd->queue_lock);
548 req = list_entry(nbd->waiting_queue.next, struct request,
48cf6061
LV
549 queuelist);
550 list_del_init(&req->queuelist);
f4507164 551 spin_unlock_irq(&nbd->queue_lock);
48cf6061
LV
552
553 /* handle request */
f4507164 554 nbd_handle_req(nbd, req);
48cf6061
LV
555 }
556 return 0;
557}
558
1da177e4
LT
559/*
560 * We always wait for result of write, for now. It would be nice to make it optional
561 * in future
f4507164 562 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
1da177e4
LT
563 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
564 */
565
15746fca 566static void do_nbd_request(struct request_queue *q)
398eb085 567 __releases(q->queue_lock) __acquires(q->queue_lock)
1da177e4
LT
568{
569 struct request *req;
570
9934c8c0 571 while ((req = blk_fetch_request(q)) != NULL) {
f4507164 572 struct nbd_device *nbd;
1da177e4 573
48cf6061
LV
574 spin_unlock_irq(q->queue_lock);
575
4aff5e23
JA
576 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
577 req->rq_disk->disk_name, req, req->cmd_type);
1da177e4 578
f4507164 579 nbd = req->rq_disk->private_data;
1da177e4 580
f4507164 581 BUG_ON(nbd->magic != NBD_MAGIC);
1da177e4 582
f4507164
WG
583 if (unlikely(!nbd->sock)) {
584 dev_err(disk_to_dev(nbd->disk),
7f1b90f9 585 "Attempted send on closed socket\n");
4d48a542
PC
586 req->errors++;
587 nbd_end_request(req);
588 spin_lock_irq(q->queue_lock);
589 continue;
590 }
591
f4507164
WG
592 spin_lock_irq(&nbd->queue_lock);
593 list_add_tail(&req->queuelist, &nbd->waiting_queue);
594 spin_unlock_irq(&nbd->queue_lock);
1da177e4 595
f4507164 596 wake_up(&nbd->waiting_wq);
4b2f0260 597
1da177e4 598 spin_lock_irq(q->queue_lock);
1da177e4 599 }
1da177e4
LT
600}
601
1a2ad211 602/* Must be called with tx_lock held */
1da177e4 603
f4507164 604static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1a2ad211
PM
605 unsigned int cmd, unsigned long arg)
606{
1da177e4 607 switch (cmd) {
1a2ad211
PM
608 case NBD_DISCONNECT: {
609 struct request sreq;
610
f4507164 611 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
3a2d63f8
PB
612 if (!nbd->sock)
613 return -EINVAL;
1a2ad211 614
3a2d63f8
PB
615 mutex_unlock(&nbd->tx_lock);
616 fsync_bdev(bdev);
617 mutex_lock(&nbd->tx_lock);
4f54eec8 618 blk_rq_init(NULL, &sreq);
4aff5e23 619 sreq.cmd_type = REQ_TYPE_SPECIAL;
1da177e4 620 nbd_cmd(&sreq) = NBD_CMD_DISC;
3a2d63f8
PB
621
622 /* Check again after getting mutex back. */
f4507164 623 if (!nbd->sock)
1da177e4 624 return -EINVAL;
3a2d63f8 625
323af551
PC
626 nbd->disconnect = 1;
627
f4507164 628 nbd_send_req(nbd, &sreq);
323af551 629 return 0;
1a2ad211 630 }
1da177e4 631
1a2ad211
PM
632 case NBD_CLEAR_SOCK: {
633 struct file *file;
634
f4507164
WG
635 nbd->sock = NULL;
636 file = nbd->file;
637 nbd->file = NULL;
638 nbd_clear_que(nbd);
639 BUG_ON(!list_empty(&nbd->queue_head));
fded4e09 640 BUG_ON(!list_empty(&nbd->waiting_queue));
3a2d63f8 641 kill_bdev(bdev);
1da177e4
LT
642 if (file)
643 fput(file);
1a2ad211
PM
644 return 0;
645 }
646
647 case NBD_SET_SOCK: {
648 struct file *file;
f4507164 649 if (nbd->file)
1da177e4 650 return -EBUSY;
1da177e4
LT
651 file = fget(arg);
652 if (file) {
496ad9aa 653 struct inode *inode = file_inode(file);
1da177e4 654 if (S_ISSOCK(inode->i_mode)) {
f4507164
WG
655 nbd->file = file;
656 nbd->sock = SOCKET_I(inode);
d71a6d73
LV
657 if (max_part > 0)
658 bdev->bd_invalidated = 1;
323af551 659 nbd->disconnect = 0; /* we're connected now */
1a2ad211 660 return 0;
1da177e4
LT
661 } else {
662 fput(file);
663 }
664 }
1a2ad211
PM
665 return -EINVAL;
666 }
667
1da177e4 668 case NBD_SET_BLKSIZE:
f4507164
WG
669 nbd->blksize = arg;
670 nbd->bytesize &= ~(nbd->blksize-1);
671 bdev->bd_inode->i_size = nbd->bytesize;
672 set_blocksize(bdev, nbd->blksize);
673 set_capacity(nbd->disk, nbd->bytesize >> 9);
1da177e4 674 return 0;
1a2ad211 675
1da177e4 676 case NBD_SET_SIZE:
f4507164
WG
677 nbd->bytesize = arg & ~(nbd->blksize-1);
678 bdev->bd_inode->i_size = nbd->bytesize;
679 set_blocksize(bdev, nbd->blksize);
680 set_capacity(nbd->disk, nbd->bytesize >> 9);
1da177e4 681 return 0;
1a2ad211 682
7fdfd406 683 case NBD_SET_TIMEOUT:
f4507164 684 nbd->xmit_timeout = arg * HZ;
7fdfd406 685 return 0;
1a2ad211 686
2f012508
PC
687 case NBD_SET_FLAGS:
688 nbd->flags = arg;
689 return 0;
690
1da177e4 691 case NBD_SET_SIZE_BLOCKS:
f4507164
WG
692 nbd->bytesize = ((u64) arg) * nbd->blksize;
693 bdev->bd_inode->i_size = nbd->bytesize;
694 set_blocksize(bdev, nbd->blksize);
695 set_capacity(nbd->disk, nbd->bytesize >> 9);
1da177e4 696 return 0;
1a2ad211
PM
697
698 case NBD_DO_IT: {
699 struct task_struct *thread;
700 struct file *file;
701 int error;
702
f4507164 703 if (nbd->pid)
c91192d6 704 return -EBUSY;
f4507164 705 if (!nbd->file)
1da177e4 706 return -EINVAL;
1a2ad211 707
f4507164 708 mutex_unlock(&nbd->tx_lock);
1a2ad211 709
a83e814b
PB
710 if (nbd->flags & NBD_FLAG_READ_ONLY)
711 set_device_ro(bdev, true);
a336d298
PC
712 if (nbd->flags & NBD_FLAG_SEND_TRIM)
713 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
714 nbd->disk->queue);
75f187ab
AB
715 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
716 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
717 else
718 blk_queue_flush(nbd->disk->queue, 0);
a336d298 719
88ce7cf7
KC
720 thread = kthread_create(nbd_thread, nbd, "%s",
721 nbd->disk->disk_name);
1a2ad211 722 if (IS_ERR(thread)) {
f4507164 723 mutex_lock(&nbd->tx_lock);
48cf6061 724 return PTR_ERR(thread);
1a2ad211 725 }
48cf6061 726 wake_up_process(thread);
f4507164 727 error = nbd_do_it(nbd);
48cf6061 728 kthread_stop(thread);
1a2ad211 729
f4507164 730 mutex_lock(&nbd->tx_lock);
84963048
WC
731 if (error)
732 return error;
f4507164
WG
733 sock_shutdown(nbd, 0);
734 file = nbd->file;
735 nbd->file = NULL;
736 nbd_clear_que(nbd);
737 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
3a2d63f8 738 kill_bdev(bdev);
a336d298 739 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
a83e814b 740 set_device_ro(bdev, false);
1da177e4
LT
741 if (file)
742 fput(file);
75f187ab 743 nbd->flags = 0;
f4507164 744 nbd->bytesize = 0;
a8cdc308 745 bdev->bd_inode->i_size = 0;
f4507164 746 set_capacity(nbd->disk, 0);
d71a6d73 747 if (max_part > 0)
a8cdc308 748 ioctl_by_bdev(bdev, BLKRRPART, 0);
323af551
PC
749 if (nbd->disconnect) /* user requested, ignore socket errors */
750 return 0;
f4507164 751 return nbd->harderror;
1a2ad211
PM
752 }
753
1da177e4 754 case NBD_CLEAR_QUE:
4b2f0260
HX
755 /*
756 * This is for compatibility only. The queue is always cleared
757 * by NBD_DO_IT or NBD_CLEAR_SOCK.
758 */
f4507164 759 BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head));
1da177e4 760 return 0;
1a2ad211 761
1da177e4 762 case NBD_PRINT_DEBUG:
f4507164 763 dev_info(disk_to_dev(nbd->disk),
5eedf541 764 "next = %p, prev = %p, head = %p\n",
f4507164
WG
765 nbd->queue_head.next, nbd->queue_head.prev,
766 &nbd->queue_head);
1da177e4
LT
767 return 0;
768 }
1a2ad211
PM
769 return -ENOTTY;
770}
771
772static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
773 unsigned int cmd, unsigned long arg)
774{
f4507164 775 struct nbd_device *nbd = bdev->bd_disk->private_data;
1a2ad211
PM
776 int error;
777
778 if (!capable(CAP_SYS_ADMIN))
779 return -EPERM;
780
f4507164 781 BUG_ON(nbd->magic != NBD_MAGIC);
1a2ad211
PM
782
783 /* Anyone capable of this syscall can do *real bad* things */
784 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
f4507164 785 nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
1a2ad211 786
f4507164
WG
787 mutex_lock(&nbd->tx_lock);
788 error = __nbd_ioctl(bdev, nbd, cmd, arg);
789 mutex_unlock(&nbd->tx_lock);
1a2ad211
PM
790
791 return error;
1da177e4
LT
792}
793
83d5cde4 794static const struct block_device_operations nbd_fops =
1da177e4
LT
795{
796 .owner = THIS_MODULE,
8a6cfeb6 797 .ioctl = nbd_ioctl,
1da177e4
LT
798};
799
800/*
801 * And here should be modules and kernel interface
802 * (Just smiley confuses emacs :-)
803 */
804
805static int __init nbd_init(void)
806{
807 int err = -ENOMEM;
808 int i;
d71a6d73 809 int part_shift;
1da177e4 810
5b7b18cc 811 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1da177e4 812
d71a6d73 813 if (max_part < 0) {
7742ce4a 814 printk(KERN_ERR "nbd: max_part must be >= 0\n");
d71a6d73
LV
815 return -EINVAL;
816 }
817
818 part_shift = 0;
5988ce23 819 if (max_part > 0) {
d71a6d73
LV
820 part_shift = fls(max_part);
821
5988ce23
NK
822 /*
823 * Adjust max_part according to part_shift as it is exported
824 * to user space so that user can know the max number of
825 * partition kernel should be able to manage.
826 *
827 * Note that -1 is required because partition 0 is reserved
828 * for the whole disk.
829 */
830 max_part = (1UL << part_shift) - 1;
831 }
832
3b271082
NK
833 if ((1UL << part_shift) > DISK_MAX_PARTS)
834 return -EINVAL;
835
836 if (nbds_max > 1UL << (MINORBITS - part_shift))
837 return -EINVAL;
838
ef6b5ead
SM
839 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
840 if (!nbd_dev)
841 return -ENOMEM;
842
40be0c28 843 for (i = 0; i < nbds_max; i++) {
d71a6d73 844 struct gendisk *disk = alloc_disk(1 << part_shift);
1da177e4
LT
845 if (!disk)
846 goto out;
847 nbd_dev[i].disk = disk;
848 /*
849 * The new linux 2.5 block layer implementation requires
850 * every gendisk to have its very own request_queue struct.
851 * These structs are big so we dynamically allocate them.
852 */
853 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
854 if (!disk->queue) {
855 put_disk(disk);
856 goto out;
857 }
31dcfab0
JA
858 /*
859 * Tell the block layer that we are not a rotational device
860 */
861 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
a336d298
PC
862 disk->queue->limits.discard_granularity = 512;
863 disk->queue->limits.max_discard_sectors = UINT_MAX;
864 disk->queue->limits.discard_zeroes_data = 0;
078be02b
MB
865 blk_queue_max_hw_sectors(disk->queue, 65536);
866 disk->queue->limits.max_sectors = 256;
1da177e4
LT
867 }
868
869 if (register_blkdev(NBD_MAJOR, "nbd")) {
870 err = -EIO;
871 goto out;
872 }
873
874 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
875 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
876
40be0c28 877 for (i = 0; i < nbds_max; i++) {
1da177e4
LT
878 struct gendisk *disk = nbd_dev[i].disk;
879 nbd_dev[i].file = NULL;
f4507164 880 nbd_dev[i].magic = NBD_MAGIC;
1da177e4 881 nbd_dev[i].flags = 0;
48cf6061 882 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1da177e4
LT
883 spin_lock_init(&nbd_dev[i].queue_lock);
884 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
82d4dc5a 885 mutex_init(&nbd_dev[i].tx_lock);
4b2f0260 886 init_waitqueue_head(&nbd_dev[i].active_wq);
48cf6061 887 init_waitqueue_head(&nbd_dev[i].waiting_wq);
1da177e4 888 nbd_dev[i].blksize = 1024;
4b86a872 889 nbd_dev[i].bytesize = 0;
1da177e4 890 disk->major = NBD_MAJOR;
d71a6d73 891 disk->first_minor = i << part_shift;
1da177e4
LT
892 disk->fops = &nbd_fops;
893 disk->private_data = &nbd_dev[i];
1da177e4 894 sprintf(disk->disk_name, "nbd%d", i);
4b86a872 895 set_capacity(disk, 0);
1da177e4
LT
896 add_disk(disk);
897 }
898
899 return 0;
900out:
901 while (i--) {
902 blk_cleanup_queue(nbd_dev[i].disk->queue);
903 put_disk(nbd_dev[i].disk);
904 }
f3944d61 905 kfree(nbd_dev);
1da177e4
LT
906 return err;
907}
908
909static void __exit nbd_cleanup(void)
910{
911 int i;
40be0c28 912 for (i = 0; i < nbds_max; i++) {
1da177e4 913 struct gendisk *disk = nbd_dev[i].disk;
40be0c28 914 nbd_dev[i].magic = 0;
1da177e4
LT
915 if (disk) {
916 del_gendisk(disk);
917 blk_cleanup_queue(disk->queue);
918 put_disk(disk);
919 }
920 }
1da177e4 921 unregister_blkdev(NBD_MAJOR, "nbd");
f3944d61 922 kfree(nbd_dev);
1da177e4
LT
923 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
924}
925
926module_init(nbd_init);
927module_exit(nbd_cleanup);
928
929MODULE_DESCRIPTION("Network Block Device");
930MODULE_LICENSE("GPL");
931
40be0c28 932module_param(nbds_max, int, 0444);
d71a6d73
LV
933MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
934module_param(max_part, int, 0444);
935MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
1da177e4
LT
936#ifndef NDEBUG
937module_param(debugflags, int, 0644);
938MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
939#endif