]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/dm-cache-metadata.h
rhashtable: remove insecure_elasticity
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm-cache-metadata.h
CommitLineData
c6b4fcba
JT
1/*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_CACHE_METADATA_H
8#define DM_CACHE_METADATA_H
9
10#include "dm-cache-block-types.h"
11#include "dm-cache-policy-internal.h"
895b47d7 12#include "persistent-data/dm-space-map-metadata.h"
c6b4fcba
JT
13
14/*----------------------------------------------------------------*/
15
895b47d7 16#define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
c6b4fcba
JT
17
18/* FIXME: remove this restriction */
19/*
20 * The metadata device is currently limited in size.
c6b4fcba 21 */
895b47d7 22#define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
c6b4fcba
JT
23
24/*
25 * A metadata device larger than 16GB triggers a warning.
26 */
27#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
28
29/*----------------------------------------------------------------*/
30
31/*
32 * Ext[234]-style compat feature flags.
33 *
34 * A new feature which old metadata will still be compatible with should
35 * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
36 *
37 * A new feature that is not compatible with old code should define a
38 * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
39 * that flag.
40 *
41 * A new feature that is not compatible with old code accessing the
42 * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
43 * guard the relevant code with that flag.
44 *
45 * As these various flags are defined they should be added to the
46 * following masks.
47 */
629d0a8a 48
c6b4fcba
JT
49#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
50#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
51#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
52
53/*
629d0a8a
JT
54 * Reopens or creates a new, empty metadata volume. Returns an ERR_PTR on
55 * failure. If reopening then features must match.
c6b4fcba
JT
56 */
57struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
58 sector_t data_block_size,
59 bool may_format_device,
629d0a8a
JT
60 size_t policy_hint_size,
61 unsigned metadata_version);
c6b4fcba
JT
62
63void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
64
65/*
66 * The metadata needs to know how many cache blocks there are. We don't
67 * care about the origin, assuming the core target is giving us valid
68 * origin blocks to map to.
69 */
70int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
d14fcf3d 71int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
c6b4fcba
JT
72
73int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
74 sector_t discard_block_size,
1bad9bc4 75 dm_dblock_t new_nr_entries);
c6b4fcba
JT
76
77typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
1bad9bc4 78 dm_dblock_t dblock, bool discarded);
c6b4fcba
JT
79int dm_cache_load_discards(struct dm_cache_metadata *cmd,
80 load_discard_fn fn, void *context);
81
1bad9bc4 82int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
c6b4fcba
JT
83
84int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
85int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
86int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
87
88typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
89 dm_cblock_t cblock, bool dirty,
90 uint32_t hint, bool hint_valid);
91int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
ea2dd8c1 92 struct dm_cache_policy *policy,
c6b4fcba
JT
93 load_mapping_fn fn,
94 void *context);
95
629d0a8a
JT
96int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
97 unsigned nr_bits, unsigned long *bits);
c6b4fcba
JT
98
99struct dm_cache_statistics {
100 uint32_t read_hits;
101 uint32_t read_misses;
102 uint32_t write_hits;
103 uint32_t write_misses;
104};
105
106void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
107 struct dm_cache_statistics *stats);
028ae9f7
JT
108
109/*
110 * 'void' because it's no big deal if it fails.
111 */
c6b4fcba
JT
112void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
113 struct dm_cache_statistics *stats);
114
115int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
116
117int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
118 dm_block_t *result);
119
120int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
121 dm_block_t *result);
122
123void dm_cache_dump(struct dm_cache_metadata *cmd);
124
125/*
126 * The policy is invited to save a 32bit hint value for every cblock (eg,
127 * for a hit count). These are stored against the policy name. If
128 * policies are changed, then hints will be lost. If the machine crashes,
129 * hints will be lost.
130 *
131 * The hints are indexed by the cblock, but many policies will not
132 * neccessarily have a fast way of accessing efficiently via cblock. So
133 * rather than querying the policy for each cblock, we let it walk its data
134 * structures and fill in the hints in whatever order it wishes.
135 */
0596661f 136int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
c6b4fcba 137
2ee57d58
JT
138/*
139 * Query method. Are all the blocks in the cache clean?
140 */
141int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
142
d14fcf3d 143int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
028ae9f7
JT
144int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
145void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
146void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
147int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
148
c6b4fcba
JT
149/*----------------------------------------------------------------*/
150
151#endif /* DM_CACHE_METADATA_H */