Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / md / dm-bufio.h
CommitLineData
95d402f0
MP
1/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9#ifndef DM_BUFIO_H
10#define DM_BUFIO_H
11
12#include <linux/blkdev.h>
13#include <linux/types.h>
14
15/*----------------------------------------------------------------*/
16
17struct dm_bufio_client;
18struct dm_buffer;
19
20/*
21 * Create a buffered IO cache on a given device
22 */
23struct dm_bufio_client *
24dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
25 unsigned reserved_buffers, unsigned aux_size,
26 void (*alloc_callback)(struct dm_buffer *),
27 void (*write_callback)(struct dm_buffer *));
28
29/*
30 * Release a buffered IO cache.
31 */
32void dm_bufio_client_destroy(struct dm_bufio_client *c);
33
400a0bef
MP
34/*
35 * Set the sector range.
36 * When this function is called, there must be no I/O in progress on the bufio
37 * client.
38 */
39void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
40
95d402f0
MP
41/*
42 * WARNING: to avoid deadlocks, these conditions are observed:
43 *
44 * - At most one thread can hold at most "reserved_buffers" simultaneously.
45 * - Each other threads can hold at most one buffer.
46 * - Threads which call only dm_bufio_get can hold unlimited number of
47 * buffers.
48 */
49
50/*
51 * Read a given block from disk. Returns pointer to data. Returns a
52 * pointer to dm_buffer that can be used to release the buffer or to make
53 * it dirty.
54 */
55void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
56 struct dm_buffer **bp);
57
58/*
59 * Like dm_bufio_read, but return buffer from cache, don't read
60 * it. If the buffer is not in the cache, return NULL.
61 */
62void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
63 struct dm_buffer **bp);
64
65/*
66 * Like dm_bufio_read, but don't read anything from the disk. It is
67 * expected that the caller initializes the buffer and marks it dirty.
68 */
69void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
70 struct dm_buffer **bp);
71
a66cc28f
MP
72/*
73 * Prefetch the specified blocks to the cache.
74 * The function starts to read the blocks and returns without waiting for
75 * I/O to finish.
76 */
77void dm_bufio_prefetch(struct dm_bufio_client *c,
78 sector_t block, unsigned n_blocks);
79
95d402f0
MP
80/*
81 * Release a reference obtained with dm_bufio_{read,get,new}. The data
82 * pointer and dm_buffer pointer is no longer valid after this call.
83 */
84void dm_bufio_release(struct dm_buffer *b);
85
86/*
87 * Mark a buffer dirty. It should be called after the buffer is modified.
88 *
89 * In case of memory pressure, the buffer may be written after
90 * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
91 * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
92 * the actual writing may occur earlier.
93 */
94void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
95
96/*
97 * Initiate writing of dirty buffers, without waiting for completion.
98 */
99void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
100
101/*
102 * Write all dirty buffers. Guarantees that all dirty buffers created prior
103 * to this call are on disk when this call exits.
104 */
105int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
106
107/*
108 * Send an empty write barrier to the device to flush hardware disk cache.
109 */
110int dm_bufio_issue_flush(struct dm_bufio_client *c);
111
112/*
113 * Like dm_bufio_release but also move the buffer to the new
114 * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
115 */
116void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
117
55494bf2
MP
118/*
119 * Free the given buffer.
120 * This is just a hint, if the buffer is in use or dirty, this function
121 * does nothing.
122 */
123void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
124
55b082e6
MP
125/*
126 * Set the minimum number of buffers before cleanup happens.
127 */
128void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
129
95d402f0
MP
130unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
131sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
132sector_t dm_bufio_get_block_number(struct dm_buffer *b);
133void *dm_bufio_get_block_data(struct dm_buffer *b);
134void *dm_bufio_get_aux_data(struct dm_buffer *b);
135struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
136
137/*----------------------------------------------------------------*/
138
139#endif