]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/nvdimm/nd.h
mlxsw: spectrum_router: Only handle IPv4 and IPv6 events
[mirror_ubuntu-focal-kernel.git] / drivers / nvdimm / nd.h
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #ifndef __ND_H__
14 #define __ND_H__
15 #include <linux/libnvdimm.h>
16 #include <linux/badblocks.h>
17 #include <linux/blkdev.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/ndctl.h>
21 #include <linux/types.h>
22 #include <linux/nd.h>
23 #include "label.h"
24
25 enum {
26 /*
27 * Limits the maximum number of block apertures a dimm can
28 * support and is an input to the geometry/on-disk-format of a
29 * BTT instance
30 */
31 ND_MAX_LANES = 256,
32 SECTOR_SHIFT = 9,
33 INT_LBASIZE_ALIGNMENT = 64,
34 NVDIMM_IO_ATOMIC = 1,
35 };
36
37 struct nd_poison {
38 u64 start;
39 u64 length;
40 struct list_head list;
41 };
42
43 struct nvdimm_drvdata {
44 struct device *dev;
45 int nsindex_size, nslabel_size;
46 struct nd_cmd_get_config_size nsarea;
47 void *data;
48 int ns_current, ns_next;
49 struct resource dpa;
50 struct kref kref;
51 };
52
53 struct nd_region_data {
54 int ns_count;
55 int ns_active;
56 unsigned int hints_shift;
57 void __iomem *flush_wpq[0];
58 };
59
60 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
61 int dimm, int hint)
62 {
63 unsigned int num = 1 << ndrd->hints_shift;
64 unsigned int mask = num - 1;
65
66 return ndrd->flush_wpq[dimm * num + (hint & mask)];
67 }
68
69 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
70 int hint, void __iomem *flush)
71 {
72 unsigned int num = 1 << ndrd->hints_shift;
73 unsigned int mask = num - 1;
74
75 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
76 }
77
78 static inline struct nd_namespace_index *to_namespace_index(
79 struct nvdimm_drvdata *ndd, int i)
80 {
81 if (i < 0)
82 return NULL;
83
84 return ndd->data + sizeof_namespace_index(ndd) * i;
85 }
86
87 static inline struct nd_namespace_index *to_current_namespace_index(
88 struct nvdimm_drvdata *ndd)
89 {
90 return to_namespace_index(ndd, ndd->ns_current);
91 }
92
93 static inline struct nd_namespace_index *to_next_namespace_index(
94 struct nvdimm_drvdata *ndd)
95 {
96 return to_namespace_index(ndd, ndd->ns_next);
97 }
98
99 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
100
101 #define namespace_label_has(ndd, field) \
102 (offsetof(struct nd_namespace_label, field) \
103 < sizeof_namespace_label(ndd))
104
105 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
106 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
107 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
108 (unsigned long long) (res ? resource_size(res) : 0), \
109 (unsigned long long) (res ? res->start : 0), ##arg)
110
111 #define for_each_dpa_resource(ndd, res) \
112 for (res = (ndd)->dpa.child; res; res = res->sibling)
113
114 #define for_each_dpa_resource_safe(ndd, res, next) \
115 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
116 res; res = next, next = next ? next->sibling : NULL)
117
118 struct nd_percpu_lane {
119 int count;
120 spinlock_t lock;
121 };
122
123 struct nd_label_ent {
124 struct list_head list;
125 struct nd_namespace_label *label;
126 };
127
128 enum nd_mapping_lock_class {
129 ND_MAPPING_CLASS0,
130 ND_MAPPING_UUID_SCAN,
131 };
132
133 struct nd_mapping {
134 struct nvdimm *nvdimm;
135 u64 start;
136 u64 size;
137 struct list_head labels;
138 struct mutex lock;
139 /*
140 * @ndd is for private use at region enable / disable time for
141 * get_ndd() + put_ndd(), all other nd_mapping to ndd
142 * conversions use to_ndd() which respects enabled state of the
143 * nvdimm.
144 */
145 struct nvdimm_drvdata *ndd;
146 };
147
148 struct nd_region {
149 struct device dev;
150 struct ida ns_ida;
151 struct ida btt_ida;
152 struct ida pfn_ida;
153 struct ida dax_ida;
154 unsigned long flags;
155 struct device *ns_seed;
156 struct device *btt_seed;
157 struct device *pfn_seed;
158 struct device *dax_seed;
159 u16 ndr_mappings;
160 u64 ndr_size;
161 u64 ndr_start;
162 int id, num_lanes, ro, numa_node;
163 void *provider_data;
164 struct kernfs_node *bb_state;
165 struct badblocks bb;
166 struct nd_interleave_set *nd_set;
167 struct nd_percpu_lane __percpu *lane;
168 struct nd_mapping mapping[0];
169 };
170
171 struct nd_blk_region {
172 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
173 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
174 void *iobuf, u64 len, int rw);
175 void *blk_provider_data;
176 struct nd_region nd_region;
177 };
178
179 /*
180 * Lookup next in the repeating sequence of 01, 10, and 11.
181 */
182 static inline unsigned nd_inc_seq(unsigned seq)
183 {
184 static const unsigned next[] = { 0, 2, 3, 1 };
185
186 return next[seq & 3];
187 }
188
189 struct btt;
190 struct nd_btt {
191 struct device dev;
192 struct nd_namespace_common *ndns;
193 struct btt *btt;
194 unsigned long lbasize;
195 u64 size;
196 u8 *uuid;
197 int id;
198 int initial_offset;
199 u16 version_major;
200 u16 version_minor;
201 };
202
203 enum nd_pfn_mode {
204 PFN_MODE_NONE,
205 PFN_MODE_RAM,
206 PFN_MODE_PMEM,
207 };
208
209 struct nd_pfn {
210 int id;
211 u8 *uuid;
212 struct device dev;
213 unsigned long align;
214 unsigned long npfns;
215 enum nd_pfn_mode mode;
216 struct nd_pfn_sb *pfn_sb;
217 struct nd_namespace_common *ndns;
218 };
219
220 struct nd_dax {
221 struct nd_pfn nd_pfn;
222 };
223
224 enum nd_async_mode {
225 ND_SYNC,
226 ND_ASYNC,
227 };
228
229 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
230 void wait_nvdimm_bus_probe_idle(struct device *dev);
231 void nd_device_register(struct device *dev);
232 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
233 void nd_device_notify(struct device *dev, enum nvdimm_event event);
234 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
235 size_t len);
236 ssize_t nd_sector_size_show(unsigned long current_lbasize,
237 const unsigned long *supported, char *buf);
238 ssize_t nd_sector_size_store(struct device *dev, const char *buf,
239 unsigned long *current_lbasize, const unsigned long *supported);
240 int __init nvdimm_init(void);
241 int __init nd_region_init(void);
242 int __init nd_label_init(void);
243 void nvdimm_exit(void);
244 void nd_region_exit(void);
245 struct nvdimm;
246 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
247 int nvdimm_check_config_data(struct device *dev);
248 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
249 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
250 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
251 void *buf, size_t len);
252 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
253 unsigned int len);
254 void nvdimm_set_aliasing(struct device *dev);
255 void nvdimm_set_locked(struct device *dev);
256 struct nd_btt *to_nd_btt(struct device *dev);
257
258 struct nd_gen_sb {
259 char reserved[SZ_4K - 8];
260 __le64 checksum;
261 };
262
263 u64 nd_sb_checksum(struct nd_gen_sb *sb);
264 #if IS_ENABLED(CONFIG_BTT)
265 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
266 bool is_nd_btt(struct device *dev);
267 struct device *nd_btt_create(struct nd_region *nd_region);
268 #else
269 static inline int nd_btt_probe(struct device *dev,
270 struct nd_namespace_common *ndns)
271 {
272 return -ENODEV;
273 }
274
275 static inline bool is_nd_btt(struct device *dev)
276 {
277 return false;
278 }
279
280 static inline struct device *nd_btt_create(struct nd_region *nd_region)
281 {
282 return NULL;
283 }
284 #endif
285
286 struct nd_pfn *to_nd_pfn(struct device *dev);
287 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
288 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
289 bool is_nd_pfn(struct device *dev);
290 struct device *nd_pfn_create(struct nd_region *nd_region);
291 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
292 struct nd_namespace_common *ndns);
293 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
294 extern struct attribute_group nd_pfn_attribute_group;
295 #else
296 static inline int nd_pfn_probe(struct device *dev,
297 struct nd_namespace_common *ndns)
298 {
299 return -ENODEV;
300 }
301
302 static inline bool is_nd_pfn(struct device *dev)
303 {
304 return false;
305 }
306
307 static inline struct device *nd_pfn_create(struct nd_region *nd_region)
308 {
309 return NULL;
310 }
311
312 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
313 {
314 return -ENODEV;
315 }
316 #endif
317
318 struct nd_dax *to_nd_dax(struct device *dev);
319 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
320 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
321 bool is_nd_dax(struct device *dev);
322 struct device *nd_dax_create(struct nd_region *nd_region);
323 #else
324 static inline int nd_dax_probe(struct device *dev,
325 struct nd_namespace_common *ndns)
326 {
327 return -ENODEV;
328 }
329
330 static inline bool is_nd_dax(struct device *dev)
331 {
332 return false;
333 }
334
335 static inline struct device *nd_dax_create(struct nd_region *nd_region)
336 {
337 return NULL;
338 }
339 #endif
340
341 struct nd_region *to_nd_region(struct device *dev);
342 int nd_region_to_nstype(struct nd_region *nd_region);
343 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
344 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
345 struct nd_namespace_index *nsindex);
346 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
347 void nvdimm_bus_lock(struct device *dev);
348 void nvdimm_bus_unlock(struct device *dev);
349 bool is_nvdimm_bus_locked(struct device *dev);
350 int nvdimm_revalidate_disk(struct gendisk *disk);
351 void nvdimm_drvdata_release(struct kref *kref);
352 void put_ndd(struct nvdimm_drvdata *ndd);
353 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
354 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
355 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
356 struct nd_label_id *label_id, resource_size_t start,
357 resource_size_t n);
358 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
359 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
360 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
361 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
362 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
363 char *name);
364 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
365 void nvdimm_badblocks_populate(struct nd_region *nd_region,
366 struct badblocks *bb, const struct resource *res);
367 #if IS_ENABLED(CONFIG_ND_CLAIM)
368 struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
369 struct resource *res, struct vmem_altmap *altmap);
370 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
371 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
372 #else
373 static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
374 struct resource *res, struct vmem_altmap *altmap)
375 {
376 return ERR_PTR(-ENXIO);
377 }
378 static inline int devm_nsio_enable(struct device *dev,
379 struct nd_namespace_io *nsio)
380 {
381 return -ENXIO;
382 }
383 static inline void devm_nsio_disable(struct device *dev,
384 struct nd_namespace_io *nsio)
385 {
386 }
387 #endif
388 int nd_blk_region_init(struct nd_region *nd_region);
389 int nd_region_activate(struct nd_region *nd_region);
390 void __nd_iostat_start(struct bio *bio, unsigned long *start);
391 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
392 {
393 struct gendisk *disk = bio->bi_disk;
394
395 if (!blk_queue_io_stat(disk->queue))
396 return false;
397
398 *start = jiffies;
399 generic_start_io_acct(disk->queue, bio_data_dir(bio),
400 bio_sectors(bio), &disk->part0);
401 return true;
402 }
403 static inline void nd_iostat_end(struct bio *bio, unsigned long start)
404 {
405 struct gendisk *disk = bio->bi_disk;
406
407 generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0,
408 start);
409 }
410 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
411 unsigned int len)
412 {
413 if (bb->count) {
414 sector_t first_bad;
415 int num_bad;
416
417 return !!badblocks_check(bb, sector, len / 512, &first_bad,
418 &num_bad);
419 }
420
421 return false;
422 }
423 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
424 const u8 *nd_dev_to_uuid(struct device *dev);
425 bool pmem_should_map_pages(struct device *dev);
426 #endif /* __ND_H__ */