Using spinlock to protect interrupt flag
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / md / dm-cache-metadata.h
CommitLineData
c6b4fcba
JT
1/*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_CACHE_METADATA_H
8#define DM_CACHE_METADATA_H
9
10#include "dm-cache-block-types.h"
11#include "dm-cache-policy-internal.h"
895b47d7 12#include "persistent-data/dm-space-map-metadata.h"
c6b4fcba
JT
13
14/*----------------------------------------------------------------*/
15
895b47d7 16#define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
c6b4fcba
JT
17
18/* FIXME: remove this restriction */
19/*
20 * The metadata device is currently limited in size.
c6b4fcba 21 */
895b47d7 22#define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
c6b4fcba
JT
23
24/*
25 * A metadata device larger than 16GB triggers a warning.
26 */
27#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
28
29/*----------------------------------------------------------------*/
30
31/*
32 * Ext[234]-style compat feature flags.
33 *
34 * A new feature which old metadata will still be compatible with should
35 * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
36 *
37 * A new feature that is not compatible with old code should define a
38 * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
39 * that flag.
40 *
41 * A new feature that is not compatible with old code accessing the
42 * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
43 * guard the relevant code with that flag.
44 *
45 * As these various flags are defined they should be added to the
46 * following masks.
47 */
629d0a8a 48
c6b4fcba
JT
49#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
50#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
51#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
52
b29d4986
JT
53struct dm_cache_metadata;
54
c6b4fcba 55/*
629d0a8a
JT
56 * Reopens or creates a new, empty metadata volume. Returns an ERR_PTR on
57 * failure. If reopening then features must match.
c6b4fcba
JT
58 */
59struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
60 sector_t data_block_size,
61 bool may_format_device,
629d0a8a
JT
62 size_t policy_hint_size,
63 unsigned metadata_version);
c6b4fcba
JT
64
65void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
66
67/*
68 * The metadata needs to know how many cache blocks there are. We don't
69 * care about the origin, assuming the core target is giving us valid
70 * origin blocks to map to.
71 */
72int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
d14fcf3d 73int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
c6b4fcba
JT
74
75int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
76 sector_t discard_block_size,
1bad9bc4 77 dm_dblock_t new_nr_entries);
c6b4fcba
JT
78
79typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
1bad9bc4 80 dm_dblock_t dblock, bool discarded);
c6b4fcba
JT
81int dm_cache_load_discards(struct dm_cache_metadata *cmd,
82 load_discard_fn fn, void *context);
83
1bad9bc4 84int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
c6b4fcba
JT
85
86int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
87int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
88int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
89
90typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
91 dm_cblock_t cblock, bool dirty,
92 uint32_t hint, bool hint_valid);
93int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
ea2dd8c1 94 struct dm_cache_policy *policy,
c6b4fcba
JT
95 load_mapping_fn fn,
96 void *context);
97
629d0a8a
JT
98int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
99 unsigned nr_bits, unsigned long *bits);
c6b4fcba
JT
100
101struct dm_cache_statistics {
102 uint32_t read_hits;
103 uint32_t read_misses;
104 uint32_t write_hits;
105 uint32_t write_misses;
106};
107
108void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
109 struct dm_cache_statistics *stats);
028ae9f7
JT
110
111/*
112 * 'void' because it's no big deal if it fails.
113 */
c6b4fcba
JT
114void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
115 struct dm_cache_statistics *stats);
116
117int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
118
119int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
120 dm_block_t *result);
121
122int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
123 dm_block_t *result);
124
125void dm_cache_dump(struct dm_cache_metadata *cmd);
126
127/*
128 * The policy is invited to save a 32bit hint value for every cblock (eg,
129 * for a hit count). These are stored against the policy name. If
130 * policies are changed, then hints will be lost. If the machine crashes,
131 * hints will be lost.
132 *
133 * The hints are indexed by the cblock, but many policies will not
134 * neccessarily have a fast way of accessing efficiently via cblock. So
135 * rather than querying the policy for each cblock, we let it walk its data
136 * structures and fill in the hints in whatever order it wishes.
137 */
0596661f 138int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
c6b4fcba 139
2ee57d58
JT
140/*
141 * Query method. Are all the blocks in the cache clean?
142 */
143int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
144
d14fcf3d 145int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
028ae9f7
JT
146int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
147void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
148void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
149int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
150
c6b4fcba
JT
151/*----------------------------------------------------------------*/
152
153#endif /* DM_CACHE_METADATA_H */