Merge branch 'linus' into sched/core
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / nbd.c
1 /*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@suse.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
10 * This file is released under GPLv2 or later.
11 *
12 * (part of code stolen from loop.c)
13 */
14
15 #include <linux/major.h>
16
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/compiler.h>
28 #include <linux/err.h>
29 #include <linux/kernel.h>
30 #include <net/sock.h>
31 #include <linux/net.h>
32 #include <linux/kthread.h>
33
34 #include <asm/uaccess.h>
35 #include <asm/system.h>
36 #include <asm/types.h>
37
38 #include <linux/nbd.h>
39
40 #define LO_MAGIC 0x68797548
41
42 #ifdef NDEBUG
43 #define dprintk(flags, fmt...)
44 #else /* NDEBUG */
45 #define dprintk(flags, fmt...) do { \
46 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
47 } while (0)
48 #define DBG_IOCTL 0x0004
49 #define DBG_INIT 0x0010
50 #define DBG_EXIT 0x0020
51 #define DBG_BLKDEV 0x0100
52 #define DBG_RX 0x0200
53 #define DBG_TX 0x0400
54 static unsigned int debugflags;
55 #endif /* NDEBUG */
56
57 static unsigned int nbds_max = 16;
58 static struct nbd_device *nbd_dev;
59 static int max_part;
60
61 /*
62 * Use just one lock (or at most 1 per NIC). Two arguments for this:
63 * 1. Each NIC is essentially a synchronization point for all servers
64 * accessed through that NIC so there's no need to have more locks
65 * than NICs anyway.
66 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
67 * down each lock to the point where they're actually slower than just
68 * a single lock.
69 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
70 */
71 static DEFINE_SPINLOCK(nbd_lock);
72
73 #ifndef NDEBUG
74 static const char *ioctl_cmd_to_ascii(int cmd)
75 {
76 switch (cmd) {
77 case NBD_SET_SOCK: return "set-sock";
78 case NBD_SET_BLKSIZE: return "set-blksize";
79 case NBD_SET_SIZE: return "set-size";
80 case NBD_DO_IT: return "do-it";
81 case NBD_CLEAR_SOCK: return "clear-sock";
82 case NBD_CLEAR_QUE: return "clear-que";
83 case NBD_PRINT_DEBUG: return "print-debug";
84 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
85 case NBD_DISCONNECT: return "disconnect";
86 case BLKROSET: return "set-read-only";
87 case BLKFLSBUF: return "flush-buffer-cache";
88 }
89 return "unknown";
90 }
91
92 static const char *nbdcmd_to_ascii(int cmd)
93 {
94 switch (cmd) {
95 case NBD_CMD_READ: return "read";
96 case NBD_CMD_WRITE: return "write";
97 case NBD_CMD_DISC: return "disconnect";
98 }
99 return "invalid";
100 }
101 #endif /* NDEBUG */
102
103 static void nbd_end_request(struct request *req)
104 {
105 int error = req->errors ? -EIO : 0;
106 struct request_queue *q = req->q;
107 unsigned long flags;
108
109 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
110 req, error ? "failed" : "done");
111
112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request_all(req, error);
114 spin_unlock_irqrestore(q->queue_lock, flags);
115 }
116
117 static void sock_shutdown(struct nbd_device *lo, int lock)
118 {
119 /* Forcibly shutdown the socket causing all listeners
120 * to error
121 *
122 * FIXME: This code is duplicated from sys_shutdown, but
123 * there should be a more generic interface rather than
124 * calling socket ops directly here */
125 if (lock)
126 mutex_lock(&lo->tx_lock);
127 if (lo->sock) {
128 printk(KERN_WARNING "%s: shutting down socket\n",
129 lo->disk->disk_name);
130 kernel_sock_shutdown(lo->sock, SHUT_RDWR);
131 lo->sock = NULL;
132 }
133 if (lock)
134 mutex_unlock(&lo->tx_lock);
135 }
136
137 static void nbd_xmit_timeout(unsigned long arg)
138 {
139 struct task_struct *task = (struct task_struct *)arg;
140
141 printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
142 task->comm, task->pid);
143 force_sig(SIGKILL, task);
144 }
145
146 /*
147 * Send or receive packet.
148 */
149 static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
150 int msg_flags)
151 {
152 struct socket *sock = lo->sock;
153 int result;
154 struct msghdr msg;
155 struct kvec iov;
156 sigset_t blocked, oldset;
157
158 if (unlikely(!sock)) {
159 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
160 lo->disk->disk_name, (send ? "send" : "recv"));
161 return -EINVAL;
162 }
163
164 /* Allow interception of SIGKILL only
165 * Don't allow other signals to interrupt the transmission */
166 siginitsetinv(&blocked, sigmask(SIGKILL));
167 sigprocmask(SIG_SETMASK, &blocked, &oldset);
168
169 do {
170 sock->sk->sk_allocation = GFP_NOIO;
171 iov.iov_base = buf;
172 iov.iov_len = size;
173 msg.msg_name = NULL;
174 msg.msg_namelen = 0;
175 msg.msg_control = NULL;
176 msg.msg_controllen = 0;
177 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
178
179 if (send) {
180 struct timer_list ti;
181
182 if (lo->xmit_timeout) {
183 init_timer(&ti);
184 ti.function = nbd_xmit_timeout;
185 ti.data = (unsigned long)current;
186 ti.expires = jiffies + lo->xmit_timeout;
187 add_timer(&ti);
188 }
189 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
190 if (lo->xmit_timeout)
191 del_timer_sync(&ti);
192 } else
193 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
194
195 if (signal_pending(current)) {
196 siginfo_t info;
197 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
198 task_pid_nr(current), current->comm,
199 dequeue_signal_lock(current, &current->blocked, &info));
200 result = -EINTR;
201 sock_shutdown(lo, !send);
202 break;
203 }
204
205 if (result <= 0) {
206 if (result == 0)
207 result = -EPIPE; /* short read */
208 break;
209 }
210 size -= result;
211 buf += result;
212 } while (size > 0);
213
214 sigprocmask(SIG_SETMASK, &oldset, NULL);
215
216 return result;
217 }
218
219 static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec,
220 int flags)
221 {
222 int result;
223 void *kaddr = kmap(bvec->bv_page);
224 result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags);
225 kunmap(bvec->bv_page);
226 return result;
227 }
228
229 /* always call with the tx_lock held */
230 static int nbd_send_req(struct nbd_device *lo, struct request *req)
231 {
232 int result, flags;
233 struct nbd_request request;
234 unsigned long size = blk_rq_bytes(req);
235
236 request.magic = htonl(NBD_REQUEST_MAGIC);
237 request.type = htonl(nbd_cmd(req));
238 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
239 request.len = htonl(size);
240 memcpy(request.handle, &req, sizeof(req));
241
242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
243 lo->disk->disk_name, req,
244 nbdcmd_to_ascii(nbd_cmd(req)),
245 (unsigned long long)blk_rq_pos(req) << 9,
246 blk_rq_bytes(req));
247 result = sock_xmit(lo, 1, &request, sizeof(request),
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
249 if (result <= 0) {
250 printk(KERN_ERR "%s: Send control failed (result %d)\n",
251 lo->disk->disk_name, result);
252 goto error_out;
253 }
254
255 if (nbd_cmd(req) == NBD_CMD_WRITE) {
256 struct req_iterator iter;
257 struct bio_vec *bvec;
258 /*
259 * we are really probing at internals to determine
260 * whether to set MSG_MORE or not...
261 */
262 rq_for_each_segment(bvec, req, iter) {
263 flags = 0;
264 if (!rq_iter_last(req, iter))
265 flags = MSG_MORE;
266 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
267 lo->disk->disk_name, req, bvec->bv_len);
268 result = sock_send_bvec(lo, bvec, flags);
269 if (result <= 0) {
270 printk(KERN_ERR "%s: Send data failed (result %d)\n",
271 lo->disk->disk_name, result);
272 goto error_out;
273 }
274 }
275 }
276 return 0;
277
278 error_out:
279 return -EIO;
280 }
281
282 static struct request *nbd_find_request(struct nbd_device *lo,
283 struct request *xreq)
284 {
285 struct request *req, *tmp;
286 int err;
287
288 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
289 if (unlikely(err))
290 goto out;
291
292 spin_lock(&lo->queue_lock);
293 list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) {
294 if (req != xreq)
295 continue;
296 list_del_init(&req->queuelist);
297 spin_unlock(&lo->queue_lock);
298 return req;
299 }
300 spin_unlock(&lo->queue_lock);
301
302 err = -ENOENT;
303
304 out:
305 return ERR_PTR(err);
306 }
307
308 static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec)
309 {
310 int result;
311 void *kaddr = kmap(bvec->bv_page);
312 result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len,
313 MSG_WAITALL);
314 kunmap(bvec->bv_page);
315 return result;
316 }
317
318 /* NULL returned = something went wrong, inform userspace */
319 static struct request *nbd_read_stat(struct nbd_device *lo)
320 {
321 int result;
322 struct nbd_reply reply;
323 struct request *req;
324
325 reply.magic = 0;
326 result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL);
327 if (result <= 0) {
328 printk(KERN_ERR "%s: Receive control failed (result %d)\n",
329 lo->disk->disk_name, result);
330 goto harderror;
331 }
332
333 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
334 printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
335 lo->disk->disk_name,
336 (unsigned long)ntohl(reply.magic));
337 result = -EPROTO;
338 goto harderror;
339 }
340
341 req = nbd_find_request(lo, *(struct request **)reply.handle);
342 if (IS_ERR(req)) {
343 result = PTR_ERR(req);
344 if (result != -ENOENT)
345 goto harderror;
346
347 printk(KERN_ERR "%s: Unexpected reply (%p)\n",
348 lo->disk->disk_name, reply.handle);
349 result = -EBADR;
350 goto harderror;
351 }
352
353 if (ntohl(reply.error)) {
354 printk(KERN_ERR "%s: Other side returned error (%d)\n",
355 lo->disk->disk_name, ntohl(reply.error));
356 req->errors++;
357 return req;
358 }
359
360 dprintk(DBG_RX, "%s: request %p: got reply\n",
361 lo->disk->disk_name, req);
362 if (nbd_cmd(req) == NBD_CMD_READ) {
363 struct req_iterator iter;
364 struct bio_vec *bvec;
365
366 rq_for_each_segment(bvec, req, iter) {
367 result = sock_recv_bvec(lo, bvec);
368 if (result <= 0) {
369 printk(KERN_ERR "%s: Receive data failed (result %d)\n",
370 lo->disk->disk_name, result);
371 req->errors++;
372 return req;
373 }
374 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
375 lo->disk->disk_name, req, bvec->bv_len);
376 }
377 }
378 return req;
379 harderror:
380 lo->harderror = result;
381 return NULL;
382 }
383
384 static ssize_t pid_show(struct device *dev,
385 struct device_attribute *attr, char *buf)
386 {
387 struct gendisk *disk = dev_to_disk(dev);
388
389 return sprintf(buf, "%ld\n",
390 (long) ((struct nbd_device *)disk->private_data)->pid);
391 }
392
393 static struct device_attribute pid_attr = {
394 .attr = { .name = "pid", .mode = S_IRUGO},
395 .show = pid_show,
396 };
397
398 static int nbd_do_it(struct nbd_device *lo)
399 {
400 struct request *req;
401 int ret;
402
403 BUG_ON(lo->magic != LO_MAGIC);
404
405 lo->pid = current->pid;
406 ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
407 if (ret) {
408 printk(KERN_ERR "nbd: sysfs_create_file failed!");
409 lo->pid = 0;
410 return ret;
411 }
412
413 while ((req = nbd_read_stat(lo)) != NULL)
414 nbd_end_request(req);
415
416 sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
417 lo->pid = 0;
418 return 0;
419 }
420
421 static void nbd_clear_que(struct nbd_device *lo)
422 {
423 struct request *req;
424
425 BUG_ON(lo->magic != LO_MAGIC);
426
427 /*
428 * Because we have set lo->sock to NULL under the tx_lock, all
429 * modifications to the list must have completed by now. For
430 * the same reason, the active_req must be NULL.
431 *
432 * As a consequence, we don't need to take the spin lock while
433 * purging the list here.
434 */
435 BUG_ON(lo->sock);
436 BUG_ON(lo->active_req);
437
438 while (!list_empty(&lo->queue_head)) {
439 req = list_entry(lo->queue_head.next, struct request,
440 queuelist);
441 list_del_init(&req->queuelist);
442 req->errors++;
443 nbd_end_request(req);
444 }
445 }
446
447
448 static void nbd_handle_req(struct nbd_device *lo, struct request *req)
449 {
450 if (!blk_fs_request(req))
451 goto error_out;
452
453 nbd_cmd(req) = NBD_CMD_READ;
454 if (rq_data_dir(req) == WRITE) {
455 nbd_cmd(req) = NBD_CMD_WRITE;
456 if (lo->flags & NBD_READ_ONLY) {
457 printk(KERN_ERR "%s: Write on read-only\n",
458 lo->disk->disk_name);
459 goto error_out;
460 }
461 }
462
463 req->errors = 0;
464
465 mutex_lock(&lo->tx_lock);
466 if (unlikely(!lo->sock)) {
467 mutex_unlock(&lo->tx_lock);
468 printk(KERN_ERR "%s: Attempted send on closed socket\n",
469 lo->disk->disk_name);
470 goto error_out;
471 }
472
473 lo->active_req = req;
474
475 if (nbd_send_req(lo, req) != 0) {
476 printk(KERN_ERR "%s: Request send failed\n",
477 lo->disk->disk_name);
478 req->errors++;
479 nbd_end_request(req);
480 } else {
481 spin_lock(&lo->queue_lock);
482 list_add(&req->queuelist, &lo->queue_head);
483 spin_unlock(&lo->queue_lock);
484 }
485
486 lo->active_req = NULL;
487 mutex_unlock(&lo->tx_lock);
488 wake_up_all(&lo->active_wq);
489
490 return;
491
492 error_out:
493 req->errors++;
494 nbd_end_request(req);
495 }
496
497 static int nbd_thread(void *data)
498 {
499 struct nbd_device *lo = data;
500 struct request *req;
501
502 set_user_nice(current, -20);
503 while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
504 /* wait for something to do */
505 wait_event_interruptible(lo->waiting_wq,
506 kthread_should_stop() ||
507 !list_empty(&lo->waiting_queue));
508
509 /* extract request */
510 if (list_empty(&lo->waiting_queue))
511 continue;
512
513 spin_lock_irq(&lo->queue_lock);
514 req = list_entry(lo->waiting_queue.next, struct request,
515 queuelist);
516 list_del_init(&req->queuelist);
517 spin_unlock_irq(&lo->queue_lock);
518
519 /* handle request */
520 nbd_handle_req(lo, req);
521 }
522 return 0;
523 }
524
525 /*
526 * We always wait for result of write, for now. It would be nice to make it optional
527 * in future
528 * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
529 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
530 */
531
532 static void do_nbd_request(struct request_queue *q)
533 {
534 struct request *req;
535
536 while ((req = blk_fetch_request(q)) != NULL) {
537 struct nbd_device *lo;
538
539 spin_unlock_irq(q->queue_lock);
540
541 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
542 req->rq_disk->disk_name, req, req->cmd_type);
543
544 lo = req->rq_disk->private_data;
545
546 BUG_ON(lo->magic != LO_MAGIC);
547
548 if (unlikely(!lo->sock)) {
549 printk(KERN_ERR "%s: Attempted send on closed socket\n",
550 lo->disk->disk_name);
551 req->errors++;
552 nbd_end_request(req);
553 spin_lock_irq(q->queue_lock);
554 continue;
555 }
556
557 spin_lock_irq(&lo->queue_lock);
558 list_add_tail(&req->queuelist, &lo->waiting_queue);
559 spin_unlock_irq(&lo->queue_lock);
560
561 wake_up(&lo->waiting_wq);
562
563 spin_lock_irq(q->queue_lock);
564 }
565 }
566
567 /* Must be called with tx_lock held */
568
569 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
570 unsigned int cmd, unsigned long arg)
571 {
572 switch (cmd) {
573 case NBD_DISCONNECT: {
574 struct request sreq;
575
576 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
577
578 blk_rq_init(NULL, &sreq);
579 sreq.cmd_type = REQ_TYPE_SPECIAL;
580 nbd_cmd(&sreq) = NBD_CMD_DISC;
581 if (!lo->sock)
582 return -EINVAL;
583 nbd_send_req(lo, &sreq);
584 return 0;
585 }
586
587 case NBD_CLEAR_SOCK: {
588 struct file *file;
589
590 lo->sock = NULL;
591 file = lo->file;
592 lo->file = NULL;
593 nbd_clear_que(lo);
594 BUG_ON(!list_empty(&lo->queue_head));
595 if (file)
596 fput(file);
597 return 0;
598 }
599
600 case NBD_SET_SOCK: {
601 struct file *file;
602 if (lo->file)
603 return -EBUSY;
604 file = fget(arg);
605 if (file) {
606 struct inode *inode = file->f_path.dentry->d_inode;
607 if (S_ISSOCK(inode->i_mode)) {
608 lo->file = file;
609 lo->sock = SOCKET_I(inode);
610 if (max_part > 0)
611 bdev->bd_invalidated = 1;
612 return 0;
613 } else {
614 fput(file);
615 }
616 }
617 return -EINVAL;
618 }
619
620 case NBD_SET_BLKSIZE:
621 lo->blksize = arg;
622 lo->bytesize &= ~(lo->blksize-1);
623 bdev->bd_inode->i_size = lo->bytesize;
624 set_blocksize(bdev, lo->blksize);
625 set_capacity(lo->disk, lo->bytesize >> 9);
626 return 0;
627
628 case NBD_SET_SIZE:
629 lo->bytesize = arg & ~(lo->blksize-1);
630 bdev->bd_inode->i_size = lo->bytesize;
631 set_blocksize(bdev, lo->blksize);
632 set_capacity(lo->disk, lo->bytesize >> 9);
633 return 0;
634
635 case NBD_SET_TIMEOUT:
636 lo->xmit_timeout = arg * HZ;
637 return 0;
638
639 case NBD_SET_SIZE_BLOCKS:
640 lo->bytesize = ((u64) arg) * lo->blksize;
641 bdev->bd_inode->i_size = lo->bytesize;
642 set_blocksize(bdev, lo->blksize);
643 set_capacity(lo->disk, lo->bytesize >> 9);
644 return 0;
645
646 case NBD_DO_IT: {
647 struct task_struct *thread;
648 struct file *file;
649 int error;
650
651 if (lo->pid)
652 return -EBUSY;
653 if (!lo->file)
654 return -EINVAL;
655
656 mutex_unlock(&lo->tx_lock);
657
658 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
659 if (IS_ERR(thread)) {
660 mutex_lock(&lo->tx_lock);
661 return PTR_ERR(thread);
662 }
663 wake_up_process(thread);
664 error = nbd_do_it(lo);
665 kthread_stop(thread);
666
667 mutex_lock(&lo->tx_lock);
668 if (error)
669 return error;
670 sock_shutdown(lo, 0);
671 file = lo->file;
672 lo->file = NULL;
673 nbd_clear_que(lo);
674 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
675 if (file)
676 fput(file);
677 lo->bytesize = 0;
678 bdev->bd_inode->i_size = 0;
679 set_capacity(lo->disk, 0);
680 if (max_part > 0)
681 ioctl_by_bdev(bdev, BLKRRPART, 0);
682 return lo->harderror;
683 }
684
685 case NBD_CLEAR_QUE:
686 /*
687 * This is for compatibility only. The queue is always cleared
688 * by NBD_DO_IT or NBD_CLEAR_SOCK.
689 */
690 BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
691 return 0;
692
693 case NBD_PRINT_DEBUG:
694 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
695 bdev->bd_disk->disk_name,
696 lo->queue_head.next, lo->queue_head.prev,
697 &lo->queue_head);
698 return 0;
699 }
700 return -ENOTTY;
701 }
702
703 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
704 unsigned int cmd, unsigned long arg)
705 {
706 struct nbd_device *lo = bdev->bd_disk->private_data;
707 int error;
708
709 if (!capable(CAP_SYS_ADMIN))
710 return -EPERM;
711
712 BUG_ON(lo->magic != LO_MAGIC);
713
714 /* Anyone capable of this syscall can do *real bad* things */
715 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
716 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
717
718 mutex_lock(&lo->tx_lock);
719 error = __nbd_ioctl(bdev, lo, cmd, arg);
720 mutex_unlock(&lo->tx_lock);
721
722 return error;
723 }
724
725 static const struct block_device_operations nbd_fops =
726 {
727 .owner = THIS_MODULE,
728 .locked_ioctl = nbd_ioctl,
729 };
730
731 /*
732 * And here should be modules and kernel interface
733 * (Just smiley confuses emacs :-)
734 */
735
736 static int __init nbd_init(void)
737 {
738 int err = -ENOMEM;
739 int i;
740 int part_shift;
741
742 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
743
744 if (max_part < 0) {
745 printk(KERN_CRIT "nbd: max_part must be >= 0\n");
746 return -EINVAL;
747 }
748
749 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
750 if (!nbd_dev)
751 return -ENOMEM;
752
753 part_shift = 0;
754 if (max_part > 0)
755 part_shift = fls(max_part);
756
757 for (i = 0; i < nbds_max; i++) {
758 struct gendisk *disk = alloc_disk(1 << part_shift);
759 if (!disk)
760 goto out;
761 nbd_dev[i].disk = disk;
762 /*
763 * The new linux 2.5 block layer implementation requires
764 * every gendisk to have its very own request_queue struct.
765 * These structs are big so we dynamically allocate them.
766 */
767 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
768 if (!disk->queue) {
769 put_disk(disk);
770 goto out;
771 }
772 /*
773 * Tell the block layer that we are not a rotational device
774 */
775 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
776 }
777
778 if (register_blkdev(NBD_MAJOR, "nbd")) {
779 err = -EIO;
780 goto out;
781 }
782
783 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
784 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
785
786 for (i = 0; i < nbds_max; i++) {
787 struct gendisk *disk = nbd_dev[i].disk;
788 nbd_dev[i].file = NULL;
789 nbd_dev[i].magic = LO_MAGIC;
790 nbd_dev[i].flags = 0;
791 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
792 spin_lock_init(&nbd_dev[i].queue_lock);
793 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
794 mutex_init(&nbd_dev[i].tx_lock);
795 init_waitqueue_head(&nbd_dev[i].active_wq);
796 init_waitqueue_head(&nbd_dev[i].waiting_wq);
797 nbd_dev[i].blksize = 1024;
798 nbd_dev[i].bytesize = 0;
799 disk->major = NBD_MAJOR;
800 disk->first_minor = i << part_shift;
801 disk->fops = &nbd_fops;
802 disk->private_data = &nbd_dev[i];
803 sprintf(disk->disk_name, "nbd%d", i);
804 set_capacity(disk, 0);
805 add_disk(disk);
806 }
807
808 return 0;
809 out:
810 while (i--) {
811 blk_cleanup_queue(nbd_dev[i].disk->queue);
812 put_disk(nbd_dev[i].disk);
813 }
814 kfree(nbd_dev);
815 return err;
816 }
817
818 static void __exit nbd_cleanup(void)
819 {
820 int i;
821 for (i = 0; i < nbds_max; i++) {
822 struct gendisk *disk = nbd_dev[i].disk;
823 nbd_dev[i].magic = 0;
824 if (disk) {
825 del_gendisk(disk);
826 blk_cleanup_queue(disk->queue);
827 put_disk(disk);
828 }
829 }
830 unregister_blkdev(NBD_MAJOR, "nbd");
831 kfree(nbd_dev);
832 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
833 }
834
835 module_init(nbd_init);
836 module_exit(nbd_cleanup);
837
838 MODULE_DESCRIPTION("Network Block Device");
839 MODULE_LICENSE("GPL");
840
841 module_param(nbds_max, int, 0444);
842 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
843 module_param(max_part, int, 0444);
844 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
845 #ifndef NDEBUG
846 module_param(debugflags, int, 0644);
847 MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
848 #endif