1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 #include <linux/bitmap.h>
44 #include <xen/events.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include <xen/balloon.h>
53 * These are rather arbitrary. They are fairly large because adjacent requests
54 * pulled from a communication ring are quite likely to end up being part of
55 * the same scatter/gather request at the disc.
57 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
59 * This will increase the chances of being able to write whole tracks.
60 * 64 should be enough to keep us competitive with Linux.
62 static int xen_blkif_reqs
= 64;
63 module_param_named(reqs
, xen_blkif_reqs
, int, 0);
64 MODULE_PARM_DESC(reqs
, "Number of blkback requests to allocate");
66 /* Run-time switchable: /sys/module/blkback/parameters/ */
67 static unsigned int log_stats
;
68 module_param(log_stats
, int, 0644);
71 * Each outstanding request that we've passed to the lower device layers has a
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements
73 * the pendcnt towards zero. When it hits zero, the specified domain has a
74 * response queued for it, with the saved 'id' passed back.
77 struct xen_blkif
*blkif
;
81 unsigned short operation
;
83 struct list_head free_list
;
84 DECLARE_BITMAP(unmap_seg
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
87 #define BLKBACK_INVALID_HANDLE (~0)
90 struct pending_req
*pending_reqs
;
91 /* List of all 'pending_req' available */
92 struct list_head pending_free
;
93 /* And its spinlock. */
94 spinlock_t pending_free_lock
;
95 wait_queue_head_t pending_free_wq
;
96 /* The list of all pages that are available. */
97 struct page
**pending_pages
;
98 /* And the grant handles that are available. */
99 grant_handle_t
*pending_grant_handles
;
102 static struct xen_blkbk
*blkbk
;
105 * Maximum number of grant pages that can be mapped in blkback.
106 * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of
107 * pages that blkback will persistently map.
108 * Currently, this is:
109 * RING_SIZE = 32 (for all known ring types)
110 * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
111 * sizeof(struct persistent_gnt) = 48
112 * So the maximum memory used to store the grants is:
113 * 32 * 11 * 48 = 16896 bytes
115 static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol
)
118 case BLKIF_PROTOCOL_NATIVE
:
119 return __CONST_RING_SIZE(blkif
, PAGE_SIZE
) *
120 BLKIF_MAX_SEGMENTS_PER_REQUEST
;
121 case BLKIF_PROTOCOL_X86_32
:
122 return __CONST_RING_SIZE(blkif_x86_32
, PAGE_SIZE
) *
123 BLKIF_MAX_SEGMENTS_PER_REQUEST
;
124 case BLKIF_PROTOCOL_X86_64
:
125 return __CONST_RING_SIZE(blkif_x86_64
, PAGE_SIZE
) *
126 BLKIF_MAX_SEGMENTS_PER_REQUEST
;
135 * Little helpful macro to figure out the index and virtual address of the
136 * pending_pages[..]. For each 'pending_req' we have have up to
137 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
138 * 10 and would index in the pending_pages[..].
140 static inline int vaddr_pagenr(struct pending_req
*req
, int seg
)
142 return (req
- blkbk
->pending_reqs
) *
143 BLKIF_MAX_SEGMENTS_PER_REQUEST
+ seg
;
146 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
148 static inline unsigned long vaddr(struct pending_req
*req
, int seg
)
150 unsigned long pfn
= page_to_pfn(blkbk
->pending_page(req
, seg
));
151 return (unsigned long)pfn_to_kaddr(pfn
);
154 #define pending_handle(_req, _seg) \
155 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
158 static int do_block_io_op(struct xen_blkif
*blkif
);
159 static int dispatch_rw_block_io(struct xen_blkif
*blkif
,
160 struct blkif_request
*req
,
161 struct pending_req
*pending_req
);
162 static void make_response(struct xen_blkif
*blkif
, u64 id
,
163 unsigned short op
, int st
);
165 #define foreach_grant_safe(pos, n, rbtree, node) \
166 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
167 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
168 &(pos)->node != NULL; \
169 (pos) = container_of(n, typeof(*(pos)), node), \
170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
173 static void add_persistent_gnt(struct rb_root
*root
,
174 struct persistent_gnt
*persistent_gnt
)
176 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
177 struct persistent_gnt
*this;
179 /* Figure out where to put new node */
181 this = container_of(*new, struct persistent_gnt
, node
);
184 if (persistent_gnt
->gnt
< this->gnt
)
185 new = &((*new)->rb_left
);
186 else if (persistent_gnt
->gnt
> this->gnt
)
187 new = &((*new)->rb_right
);
189 pr_alert(DRV_PFX
" trying to add a gref that's already in the tree\n");
194 /* Add new node and rebalance tree. */
195 rb_link_node(&(persistent_gnt
->node
), parent
, new);
196 rb_insert_color(&(persistent_gnt
->node
), root
);
199 static struct persistent_gnt
*get_persistent_gnt(struct rb_root
*root
,
202 struct persistent_gnt
*data
;
203 struct rb_node
*node
= root
->rb_node
;
206 data
= container_of(node
, struct persistent_gnt
, node
);
208 if (gref
< data
->gnt
)
209 node
= node
->rb_left
;
210 else if (gref
> data
->gnt
)
211 node
= node
->rb_right
;
218 static void free_persistent_gnts(struct rb_root
*root
, unsigned int num
)
220 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
221 struct page
*pages
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
222 struct persistent_gnt
*persistent_gnt
;
225 int segs_to_unmap
= 0;
227 foreach_grant_safe(persistent_gnt
, n
, root
, node
) {
228 BUG_ON(persistent_gnt
->handle
==
229 BLKBACK_INVALID_HANDLE
);
230 gnttab_set_unmap_op(&unmap
[segs_to_unmap
],
231 (unsigned long) pfn_to_kaddr(page_to_pfn(
232 persistent_gnt
->page
)),
234 persistent_gnt
->handle
);
236 pages
[segs_to_unmap
] = persistent_gnt
->page
;
238 if (++segs_to_unmap
== BLKIF_MAX_SEGMENTS_PER_REQUEST
||
239 !rb_next(&persistent_gnt
->node
)) {
240 ret
= gnttab_unmap_refs(unmap
, NULL
, pages
,
243 free_xenballooned_pages(segs_to_unmap
, pages
);
247 rb_erase(&persistent_gnt
->node
, root
);
248 kfree(persistent_gnt
);
255 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
257 static struct pending_req
*alloc_req(void)
259 struct pending_req
*req
= NULL
;
262 spin_lock_irqsave(&blkbk
->pending_free_lock
, flags
);
263 if (!list_empty(&blkbk
->pending_free
)) {
264 req
= list_entry(blkbk
->pending_free
.next
, struct pending_req
,
266 list_del(&req
->free_list
);
268 spin_unlock_irqrestore(&blkbk
->pending_free_lock
, flags
);
273 * Return the 'pending_req' structure back to the freepool. We also
274 * wake up the thread if it was waiting for a free page.
276 static void free_req(struct pending_req
*req
)
281 spin_lock_irqsave(&blkbk
->pending_free_lock
, flags
);
282 was_empty
= list_empty(&blkbk
->pending_free
);
283 list_add(&req
->free_list
, &blkbk
->pending_free
);
284 spin_unlock_irqrestore(&blkbk
->pending_free_lock
, flags
);
286 wake_up(&blkbk
->pending_free_wq
);
290 * Routines for managing virtual block devices (vbds).
292 static int xen_vbd_translate(struct phys_req
*req
, struct xen_blkif
*blkif
,
295 struct xen_vbd
*vbd
= &blkif
->vbd
;
298 if ((operation
!= READ
) && vbd
->readonly
)
301 if (likely(req
->nr_sects
)) {
302 blkif_sector_t end
= req
->sector_number
+ req
->nr_sects
;
304 if (unlikely(end
< req
->sector_number
))
306 if (unlikely(end
> vbd_sz(vbd
)))
310 req
->dev
= vbd
->pdevice
;
311 req
->bdev
= vbd
->bdev
;
318 static void xen_vbd_resize(struct xen_blkif
*blkif
)
320 struct xen_vbd
*vbd
= &blkif
->vbd
;
321 struct xenbus_transaction xbt
;
323 struct xenbus_device
*dev
= xen_blkbk_xenbus(blkif
->be
);
324 unsigned long long new_size
= vbd_sz(vbd
);
326 pr_info(DRV_PFX
"VBD Resize: Domid: %d, Device: (%d, %d)\n",
327 blkif
->domid
, MAJOR(vbd
->pdevice
), MINOR(vbd
->pdevice
));
328 pr_info(DRV_PFX
"VBD Resize: new size %llu\n", new_size
);
329 vbd
->size
= new_size
;
331 err
= xenbus_transaction_start(&xbt
);
333 pr_warn(DRV_PFX
"Error starting transaction");
336 err
= xenbus_printf(xbt
, dev
->nodename
, "sectors", "%llu",
337 (unsigned long long)vbd_sz(vbd
));
339 pr_warn(DRV_PFX
"Error writing new size");
343 * Write the current state; we will use this to synchronize
344 * the front-end. If the current state is "connected" the
345 * front-end will get the new size information online.
347 err
= xenbus_printf(xbt
, dev
->nodename
, "state", "%d", dev
->state
);
349 pr_warn(DRV_PFX
"Error writing the state");
353 err
= xenbus_transaction_end(xbt
, 0);
357 pr_warn(DRV_PFX
"Error ending transaction");
360 xenbus_transaction_end(xbt
, 1);
364 * Notification from the guest OS.
366 static void blkif_notify_work(struct xen_blkif
*blkif
)
368 blkif
->waiting_reqs
= 1;
372 irqreturn_t
xen_blkif_be_int(int irq
, void *dev_id
)
374 blkif_notify_work(dev_id
);
379 * SCHEDULER FUNCTIONS
382 static void print_stats(struct xen_blkif
*blkif
)
384 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
386 current
->comm
, blkif
->st_oo_req
,
387 blkif
->st_rd_req
, blkif
->st_wr_req
,
388 blkif
->st_f_req
, blkif
->st_ds_req
);
389 blkif
->st_print
= jiffies
+ msecs_to_jiffies(10 * 1000);
390 blkif
->st_rd_req
= 0;
391 blkif
->st_wr_req
= 0;
392 blkif
->st_oo_req
= 0;
393 blkif
->st_ds_req
= 0;
396 int xen_blkif_schedule(void *arg
)
398 struct xen_blkif
*blkif
= arg
;
399 struct xen_vbd
*vbd
= &blkif
->vbd
;
401 xen_blkif_get(blkif
);
404 while (!kthread_should_stop()) {
407 if (unlikely(vbd
->size
!= vbd_sz(vbd
)))
408 xen_vbd_resize(blkif
);
410 wait_event_interruptible(
412 blkif
->waiting_reqs
|| kthread_should_stop());
413 wait_event_interruptible(
414 blkbk
->pending_free_wq
,
415 !list_empty(&blkbk
->pending_free
) ||
416 kthread_should_stop());
418 blkif
->waiting_reqs
= 0;
419 smp_mb(); /* clear flag *before* checking for work */
421 if (do_block_io_op(blkif
))
422 blkif
->waiting_reqs
= 1;
424 if (log_stats
&& time_after(jiffies
, blkif
->st_print
))
428 /* Free all persistent grant pages */
429 if (!RB_EMPTY_ROOT(&blkif
->persistent_gnts
))
430 free_persistent_gnts(&blkif
->persistent_gnts
,
431 blkif
->persistent_gnt_c
);
433 BUG_ON(!RB_EMPTY_ROOT(&blkif
->persistent_gnts
));
434 blkif
->persistent_gnt_c
= 0;
439 blkif
->xenblkd
= NULL
;
440 xen_blkif_put(blkif
);
450 * Unmap the grant references, and also remove the M2P over-rides
451 * used in the 'pending_req'.
453 static void xen_blkbk_unmap(struct pending_req
*req
)
455 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
456 struct page
*pages
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
457 unsigned int i
, invcount
= 0;
458 grant_handle_t handle
;
461 for (i
= 0; i
< req
->nr_pages
; i
++) {
462 if (!test_bit(i
, req
->unmap_seg
))
464 handle
= pending_handle(req
, i
);
465 if (handle
== BLKBACK_INVALID_HANDLE
)
467 gnttab_set_unmap_op(&unmap
[invcount
], vaddr(req
, i
),
468 GNTMAP_host_map
, handle
);
469 pending_handle(req
, i
) = BLKBACK_INVALID_HANDLE
;
470 pages
[invcount
] = virt_to_page(vaddr(req
, i
));
474 ret
= gnttab_unmap_refs(unmap
, NULL
, pages
, invcount
);
478 static int xen_blkbk_map(struct blkif_request
*req
,
479 struct pending_req
*pending_req
,
480 struct seg_buf seg
[],
481 struct page
*pages
[])
483 struct gnttab_map_grant_ref map
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
484 struct persistent_gnt
*persistent_gnts
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
485 struct page
*pages_to_gnt
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
486 struct persistent_gnt
*persistent_gnt
= NULL
;
487 struct xen_blkif
*blkif
= pending_req
->blkif
;
488 phys_addr_t addr
= 0;
491 int nseg
= req
->u
.rw
.nr_segments
;
494 int use_persistent_gnts
;
496 use_persistent_gnts
= (blkif
->vbd
.feature_gnt_persistent
);
498 BUG_ON(blkif
->persistent_gnt_c
>
499 max_mapped_grant_pages(pending_req
->blkif
->blk_protocol
));
502 * Fill out preq.nr_sects with proper amount of sectors, and setup
503 * assign map[..] with the PFN of the page in our domain with the
504 * corresponding grant reference for each page.
506 for (i
= 0; i
< nseg
; i
++) {
509 if (use_persistent_gnts
)
510 persistent_gnt
= get_persistent_gnt(
511 &blkif
->persistent_gnts
,
512 req
->u
.rw
.seg
[i
].gref
);
514 if (persistent_gnt
) {
516 * We are using persistent grants and
517 * the grant is already mapped
520 } else if (use_persistent_gnts
&&
521 blkif
->persistent_gnt_c
<
522 max_mapped_grant_pages(blkif
->blk_protocol
)) {
524 * We are using persistent grants, the grant is
525 * not mapped but we have room for it
528 persistent_gnt
= kmalloc(
529 sizeof(struct persistent_gnt
),
533 if (alloc_xenballooned_pages(1, &persistent_gnt
->page
,
535 kfree(persistent_gnt
);
538 persistent_gnt
->gnt
= req
->u
.rw
.seg
[i
].gref
;
539 persistent_gnt
->handle
= BLKBACK_INVALID_HANDLE
;
541 pages_to_gnt
[segs_to_map
] =
542 persistent_gnt
->page
;
543 addr
= (unsigned long) pfn_to_kaddr(
544 page_to_pfn(persistent_gnt
->page
));
546 add_persistent_gnt(&blkif
->persistent_gnts
,
548 blkif
->persistent_gnt_c
++;
549 pr_debug(DRV_PFX
" grant %u added to the tree of persistent grants, using %u/%u\n",
550 persistent_gnt
->gnt
, blkif
->persistent_gnt_c
,
551 max_mapped_grant_pages(blkif
->blk_protocol
));
554 * We are either using persistent grants and
555 * hit the maximum limit of grants mapped,
556 * or we are not using persistent grants.
558 if (use_persistent_gnts
&&
559 !blkif
->vbd
.overflow_max_grants
) {
560 blkif
->vbd
.overflow_max_grants
= 1;
561 pr_alert(DRV_PFX
" domain %u, device %#x is using maximum number of persistent grants\n",
562 blkif
->domid
, blkif
->vbd
.handle
);
565 pages
[i
] = blkbk
->pending_page(pending_req
, i
);
566 addr
= vaddr(pending_req
, i
);
567 pages_to_gnt
[segs_to_map
] =
568 blkbk
->pending_page(pending_req
, i
);
571 if (persistent_gnt
) {
572 pages
[i
] = persistent_gnt
->page
;
573 persistent_gnts
[i
] = persistent_gnt
;
575 persistent_gnts
[i
] = NULL
;
579 flags
= GNTMAP_host_map
;
580 if (!persistent_gnt
&&
581 (pending_req
->operation
!= BLKIF_OP_READ
))
582 flags
|= GNTMAP_readonly
;
583 gnttab_set_map_op(&map
[segs_to_map
++], addr
,
584 flags
, req
->u
.rw
.seg
[i
].gref
,
590 ret
= gnttab_map_refs(map
, NULL
, pages_to_gnt
, segs_to_map
);
595 * Now swizzle the MFN in our domain with the MFN from the other domain
596 * so that when we access vaddr(pending_req,i) it has the contents of
597 * the page from the other domain.
599 bitmap_zero(pending_req
->unmap_seg
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
600 for (i
= 0, j
= 0; i
< nseg
; i
++) {
601 if (!persistent_gnts
[i
] ||
602 persistent_gnts
[i
]->handle
== BLKBACK_INVALID_HANDLE
) {
603 /* This is a newly mapped grant */
604 BUG_ON(j
>= segs_to_map
);
605 if (unlikely(map
[j
].status
!= 0)) {
606 pr_debug(DRV_PFX
"invalid buffer -- could not remap it\n");
607 map
[j
].handle
= BLKBACK_INVALID_HANDLE
;
609 if (persistent_gnts
[i
]) {
610 rb_erase(&persistent_gnts
[i
]->node
,
611 &blkif
->persistent_gnts
);
612 blkif
->persistent_gnt_c
--;
613 kfree(persistent_gnts
[i
]);
614 persistent_gnts
[i
] = NULL
;
618 if (persistent_gnts
[i
]) {
619 if (persistent_gnts
[i
]->handle
==
620 BLKBACK_INVALID_HANDLE
) {
622 * If this is a new persistent grant
625 persistent_gnts
[i
]->handle
= map
[j
++].handle
;
627 pending_handle(pending_req
, i
) =
628 persistent_gnts
[i
]->handle
;
633 pending_handle(pending_req
, i
) = map
[j
++].handle
;
634 bitmap_set(pending_req
->unmap_seg
, i
, 1);
639 seg
[i
].offset
= (req
->u
.rw
.seg
[i
].first_sect
<< 9);
644 static int dispatch_discard_io(struct xen_blkif
*blkif
,
645 struct blkif_request
*req
)
648 int status
= BLKIF_RSP_OKAY
;
649 struct block_device
*bdev
= blkif
->vbd
.bdev
;
650 unsigned long secure
;
651 struct phys_req preq
;
653 xen_blkif_get(blkif
);
655 preq
.sector_number
= req
->u
.discard
.sector_number
;
656 preq
.nr_sects
= req
->u
.discard
.nr_sectors
;
658 err
= xen_vbd_translate(&preq
, blkif
, WRITE
);
660 pr_warn(DRV_PFX
"access denied: DISCARD [%llu->%llu] on dev=%04x\n",
662 preq
.sector_number
+ preq
.nr_sects
, blkif
->vbd
.pdevice
);
667 secure
= (blkif
->vbd
.discard_secure
&&
668 (req
->u
.discard
.flag
& BLKIF_DISCARD_SECURE
)) ?
669 BLKDEV_DISCARD_SECURE
: 0;
671 err
= blkdev_issue_discard(bdev
, req
->u
.discard
.sector_number
,
672 req
->u
.discard
.nr_sectors
,
675 if (err
== -EOPNOTSUPP
) {
676 pr_debug(DRV_PFX
"discard op failed, not supported\n");
677 status
= BLKIF_RSP_EOPNOTSUPP
;
679 status
= BLKIF_RSP_ERROR
;
681 make_response(blkif
, req
->u
.discard
.id
, req
->operation
, status
);
682 xen_blkif_put(blkif
);
686 static int dispatch_other_io(struct xen_blkif
*blkif
,
687 struct blkif_request
*req
,
688 struct pending_req
*pending_req
)
690 free_req(pending_req
);
691 make_response(blkif
, req
->u
.other
.id
, req
->operation
,
692 BLKIF_RSP_EOPNOTSUPP
);
696 static void xen_blk_drain_io(struct xen_blkif
*blkif
)
698 atomic_set(&blkif
->drain
, 1);
700 /* The initial value is one, and one refcnt taken at the
701 * start of the xen_blkif_schedule thread. */
702 if (atomic_read(&blkif
->refcnt
) <= 2)
704 wait_for_completion_interruptible_timeout(
705 &blkif
->drain_complete
, HZ
);
707 if (!atomic_read(&blkif
->drain
))
709 } while (!kthread_should_stop());
710 atomic_set(&blkif
->drain
, 0);
714 * Completion callback on the bio's. Called as bh->b_end_io()
717 static void __end_block_io_op(struct pending_req
*pending_req
, int error
)
719 /* An error fails the entire request. */
720 if ((pending_req
->operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
721 (error
== -EOPNOTSUPP
)) {
722 pr_debug(DRV_PFX
"flush diskcache op failed, not supported\n");
723 xen_blkbk_flush_diskcache(XBT_NIL
, pending_req
->blkif
->be
, 0);
724 pending_req
->status
= BLKIF_RSP_EOPNOTSUPP
;
725 } else if ((pending_req
->operation
== BLKIF_OP_WRITE_BARRIER
) &&
726 (error
== -EOPNOTSUPP
)) {
727 pr_debug(DRV_PFX
"write barrier op failed, not supported\n");
728 xen_blkbk_barrier(XBT_NIL
, pending_req
->blkif
->be
, 0);
729 pending_req
->status
= BLKIF_RSP_EOPNOTSUPP
;
731 pr_debug(DRV_PFX
"Buffer not up-to-date at end of operation,"
732 " error=%d\n", error
);
733 pending_req
->status
= BLKIF_RSP_ERROR
;
737 * If all of the bio's have completed it is time to unmap
738 * the grant references associated with 'request' and provide
739 * the proper response on the ring.
741 if (atomic_dec_and_test(&pending_req
->pendcnt
)) {
742 xen_blkbk_unmap(pending_req
);
743 make_response(pending_req
->blkif
, pending_req
->id
,
744 pending_req
->operation
, pending_req
->status
);
745 xen_blkif_put(pending_req
->blkif
);
746 if (atomic_read(&pending_req
->blkif
->refcnt
) <= 2) {
747 if (atomic_read(&pending_req
->blkif
->drain
))
748 complete(&pending_req
->blkif
->drain_complete
);
750 free_req(pending_req
);
757 static void end_block_io_op(struct bio
*bio
, int error
)
759 __end_block_io_op(bio
->bi_private
, error
);
766 * Function to copy the from the ring buffer the 'struct blkif_request'
767 * (which has the sectors we want, number of them, grant references, etc),
768 * and transmute it to the block API to hand it over to the proper block disk.
771 __do_block_io_op(struct xen_blkif
*blkif
)
773 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
774 struct blkif_request req
;
775 struct pending_req
*pending_req
;
779 rc
= blk_rings
->common
.req_cons
;
780 rp
= blk_rings
->common
.sring
->req_prod
;
781 rmb(); /* Ensure we see queued requests up to 'rp'. */
785 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings
->common
, rc
))
788 if (kthread_should_stop()) {
793 pending_req
= alloc_req();
794 if (NULL
== pending_req
) {
800 switch (blkif
->blk_protocol
) {
801 case BLKIF_PROTOCOL_NATIVE
:
802 memcpy(&req
, RING_GET_REQUEST(&blk_rings
->native
, rc
), sizeof(req
));
804 case BLKIF_PROTOCOL_X86_32
:
805 blkif_get_x86_32_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_32
, rc
));
807 case BLKIF_PROTOCOL_X86_64
:
808 blkif_get_x86_64_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_64
, rc
));
813 blk_rings
->common
.req_cons
= ++rc
; /* before make_response() */
815 /* Apply all sanity checks to /private copy/ of request. */
818 switch (req
.operation
) {
821 case BLKIF_OP_WRITE_BARRIER
:
822 case BLKIF_OP_FLUSH_DISKCACHE
:
823 if (dispatch_rw_block_io(blkif
, &req
, pending_req
))
826 case BLKIF_OP_DISCARD
:
827 free_req(pending_req
);
828 if (dispatch_discard_io(blkif
, &req
))
832 if (dispatch_other_io(blkif
, &req
, pending_req
))
837 /* Yield point for this unbounded loop. */
845 do_block_io_op(struct xen_blkif
*blkif
)
847 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
851 more_to_do
= __do_block_io_op(blkif
);
855 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings
->common
, more_to_do
);
856 } while (more_to_do
);
861 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
862 * and call the 'submit_bio' to pass it to the underlying storage.
864 static int dispatch_rw_block_io(struct xen_blkif
*blkif
,
865 struct blkif_request
*req
,
866 struct pending_req
*pending_req
)
868 struct phys_req preq
;
869 struct seg_buf seg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
871 struct bio
*bio
= NULL
;
872 struct bio
*biolist
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
875 struct blk_plug plug
;
877 struct page
*pages
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
879 switch (req
->operation
) {
886 operation
= WRITE_ODIRECT
;
888 case BLKIF_OP_WRITE_BARRIER
:
890 case BLKIF_OP_FLUSH_DISKCACHE
:
892 operation
= WRITE_FLUSH
;
895 operation
= 0; /* make gcc happy */
900 /* Check that the number of segments is sane. */
901 nseg
= req
->u
.rw
.nr_segments
;
903 if (unlikely(nseg
== 0 && operation
!= WRITE_FLUSH
) ||
904 unlikely(nseg
> BLKIF_MAX_SEGMENTS_PER_REQUEST
)) {
905 pr_debug(DRV_PFX
"Bad number of segments in request (%d)\n",
907 /* Haven't submitted any bio's yet. */
911 preq
.sector_number
= req
->u
.rw
.sector_number
;
914 pending_req
->blkif
= blkif
;
915 pending_req
->id
= req
->u
.rw
.id
;
916 pending_req
->operation
= req
->operation
;
917 pending_req
->status
= BLKIF_RSP_OKAY
;
918 pending_req
->nr_pages
= nseg
;
920 for (i
= 0; i
< nseg
; i
++) {
921 seg
[i
].nsec
= req
->u
.rw
.seg
[i
].last_sect
-
922 req
->u
.rw
.seg
[i
].first_sect
+ 1;
923 if ((req
->u
.rw
.seg
[i
].last_sect
>= (PAGE_SIZE
>> 9)) ||
924 (req
->u
.rw
.seg
[i
].last_sect
< req
->u
.rw
.seg
[i
].first_sect
))
926 preq
.nr_sects
+= seg
[i
].nsec
;
930 if (xen_vbd_translate(&preq
, blkif
, operation
) != 0) {
931 pr_debug(DRV_PFX
"access denied: %s of [%llu,%llu] on dev=%04x\n",
932 operation
== READ
? "read" : "write",
934 preq
.sector_number
+ preq
.nr_sects
,
940 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
943 for (i
= 0; i
< nseg
; i
++) {
944 if (((int)preq
.sector_number
|(int)seg
[i
].nsec
) &
945 ((bdev_logical_block_size(preq
.bdev
) >> 9) - 1)) {
946 pr_debug(DRV_PFX
"Misaligned I/O request from domain %d",
952 /* Wait on all outstanding I/O's and once that has been completed
953 * issue the WRITE_FLUSH.
956 xen_blk_drain_io(pending_req
->blkif
);
959 * If we have failed at this point, we need to undo the M2P override,
960 * set gnttab_set_unmap_op on all of the grant references and perform
961 * the hypercall to unmap the grants - that is all done in
964 if (xen_blkbk_map(req
, pending_req
, seg
, pages
))
968 * This corresponding xen_blkif_put is done in __end_block_io_op, or
969 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
971 xen_blkif_get(blkif
);
973 for (i
= 0; i
< nseg
; i
++) {
974 while ((bio
== NULL
) ||
978 seg
[i
].offset
) == 0)) {
980 bio
= bio_alloc(GFP_KERNEL
, nseg
-i
);
981 if (unlikely(bio
== NULL
))
984 biolist
[nbio
++] = bio
;
985 bio
->bi_bdev
= preq
.bdev
;
986 bio
->bi_private
= pending_req
;
987 bio
->bi_end_io
= end_block_io_op
;
988 bio
->bi_sector
= preq
.sector_number
;
991 preq
.sector_number
+= seg
[i
].nsec
;
994 /* This will be hit if the operation was a flush or discard. */
996 BUG_ON(operation
!= WRITE_FLUSH
);
998 bio
= bio_alloc(GFP_KERNEL
, 0);
999 if (unlikely(bio
== NULL
))
1002 biolist
[nbio
++] = bio
;
1003 bio
->bi_bdev
= preq
.bdev
;
1004 bio
->bi_private
= pending_req
;
1005 bio
->bi_end_io
= end_block_io_op
;
1008 atomic_set(&pending_req
->pendcnt
, nbio
);
1009 blk_start_plug(&plug
);
1011 for (i
= 0; i
< nbio
; i
++)
1012 submit_bio(operation
, biolist
[i
]);
1014 /* Let the I/Os go.. */
1015 blk_finish_plug(&plug
);
1017 if (operation
== READ
)
1018 blkif
->st_rd_sect
+= preq
.nr_sects
;
1019 else if (operation
& WRITE
)
1020 blkif
->st_wr_sect
+= preq
.nr_sects
;
1025 xen_blkbk_unmap(pending_req
);
1027 /* Haven't submitted any bio's yet. */
1028 make_response(blkif
, req
->u
.rw
.id
, req
->operation
, BLKIF_RSP_ERROR
);
1029 free_req(pending_req
);
1030 msleep(1); /* back off a bit */
1034 for (i
= 0; i
< nbio
; i
++)
1035 bio_put(biolist
[i
]);
1036 atomic_set(&pending_req
->pendcnt
, 1);
1037 __end_block_io_op(pending_req
, -EINVAL
);
1038 msleep(1); /* back off a bit */
1045 * Put a response on the ring on how the operation fared.
1047 static void make_response(struct xen_blkif
*blkif
, u64 id
,
1048 unsigned short op
, int st
)
1050 struct blkif_response resp
;
1051 unsigned long flags
;
1052 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
1056 resp
.operation
= op
;
1059 spin_lock_irqsave(&blkif
->blk_ring_lock
, flags
);
1060 /* Place on the response ring for the relevant domain. */
1061 switch (blkif
->blk_protocol
) {
1062 case BLKIF_PROTOCOL_NATIVE
:
1063 memcpy(RING_GET_RESPONSE(&blk_rings
->native
, blk_rings
->native
.rsp_prod_pvt
),
1064 &resp
, sizeof(resp
));
1066 case BLKIF_PROTOCOL_X86_32
:
1067 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_32
, blk_rings
->x86_32
.rsp_prod_pvt
),
1068 &resp
, sizeof(resp
));
1070 case BLKIF_PROTOCOL_X86_64
:
1071 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_64
, blk_rings
->x86_64
.rsp_prod_pvt
),
1072 &resp
, sizeof(resp
));
1077 blk_rings
->common
.rsp_prod_pvt
++;
1078 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings
->common
, notify
);
1079 spin_unlock_irqrestore(&blkif
->blk_ring_lock
, flags
);
1081 notify_remote_via_irq(blkif
->irq
);
1084 static int __init
xen_blkif_init(void)
1092 blkbk
= kzalloc(sizeof(struct xen_blkbk
), GFP_KERNEL
);
1094 pr_alert(DRV_PFX
"%s: out of memory!\n", __func__
);
1098 mmap_pages
= xen_blkif_reqs
* BLKIF_MAX_SEGMENTS_PER_REQUEST
;
1100 blkbk
->pending_reqs
= kzalloc(sizeof(blkbk
->pending_reqs
[0]) *
1101 xen_blkif_reqs
, GFP_KERNEL
);
1102 blkbk
->pending_grant_handles
= kmalloc(sizeof(blkbk
->pending_grant_handles
[0]) *
1103 mmap_pages
, GFP_KERNEL
);
1104 blkbk
->pending_pages
= kzalloc(sizeof(blkbk
->pending_pages
[0]) *
1105 mmap_pages
, GFP_KERNEL
);
1107 if (!blkbk
->pending_reqs
|| !blkbk
->pending_grant_handles
||
1108 !blkbk
->pending_pages
) {
1113 for (i
= 0; i
< mmap_pages
; i
++) {
1114 blkbk
->pending_grant_handles
[i
] = BLKBACK_INVALID_HANDLE
;
1115 blkbk
->pending_pages
[i
] = alloc_page(GFP_KERNEL
);
1116 if (blkbk
->pending_pages
[i
] == NULL
) {
1121 rc
= xen_blkif_interface_init();
1125 INIT_LIST_HEAD(&blkbk
->pending_free
);
1126 spin_lock_init(&blkbk
->pending_free_lock
);
1127 init_waitqueue_head(&blkbk
->pending_free_wq
);
1129 for (i
= 0; i
< xen_blkif_reqs
; i
++)
1130 list_add_tail(&blkbk
->pending_reqs
[i
].free_list
,
1131 &blkbk
->pending_free
);
1133 rc
= xen_blkif_xenbus_init();
1140 pr_alert(DRV_PFX
"%s: out of memory\n", __func__
);
1142 kfree(blkbk
->pending_reqs
);
1143 kfree(blkbk
->pending_grant_handles
);
1144 if (blkbk
->pending_pages
) {
1145 for (i
= 0; i
< mmap_pages
; i
++) {
1146 if (blkbk
->pending_pages
[i
])
1147 __free_page(blkbk
->pending_pages
[i
]);
1149 kfree(blkbk
->pending_pages
);
1156 module_init(xen_blkif_init
);
1158 MODULE_LICENSE("Dual BSD/GPL");
1159 MODULE_ALIAS("xen-backend:vbd");