]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/nvdimm/label.c
UBUNTU: [Config] CONFIG_TEE=m
[mirror_ubuntu-artful-kernel.git] / drivers / nvdimm / label.c
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
17 #include <linux/io.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "label.h"
21 #include "nd.h"
22
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
27
28 static u32 best_seq(u32 a, u32 b)
29 {
30 a &= NSINDEX_SEQ_MASK;
31 b &= NSINDEX_SEQ_MASK;
32
33 if (a == 0 || a == b)
34 return b;
35 else if (b == 0)
36 return a;
37 else if (nd_inc_seq(a) == b)
38 return b;
39 else
40 return a;
41 }
42
43 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
44 {
45 return ndd->nslabel_size;
46 }
47
48 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
49 {
50 u32 index_span;
51
52 if (ndd->nsindex_size)
53 return ndd->nsindex_size;
54
55 /*
56 * The minimum index space is 512 bytes, with that amount of
57 * index we can describe ~1400 labels which is less than a byte
58 * of overhead per label. Round up to a byte of overhead per
59 * label and determine the size of the index region. Yes, this
60 * starts to waste space at larger config_sizes, but it's
61 * unlikely we'll ever see anything but 128K.
62 */
63 index_span = ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
64 index_span /= NSINDEX_ALIGN * 2;
65 ndd->nsindex_size = index_span * NSINDEX_ALIGN;
66
67 return ndd->nsindex_size;
68 }
69
70 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
71 {
72 return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
73 }
74
75 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
76 {
77 /*
78 * On media label format consists of two index blocks followed
79 * by an array of labels. None of these structures are ever
80 * updated in place. A sequence number tracks the current
81 * active index and the next one to write, while labels are
82 * written to free slots.
83 *
84 * +------------+
85 * | |
86 * | nsindex0 |
87 * | |
88 * +------------+
89 * | |
90 * | nsindex1 |
91 * | |
92 * +------------+
93 * | label0 |
94 * +------------+
95 * | label1 |
96 * +------------+
97 * | |
98 * ....nslot...
99 * | |
100 * +------------+
101 * | labelN |
102 * +------------+
103 */
104 struct nd_namespace_index *nsindex[] = {
105 to_namespace_index(ndd, 0),
106 to_namespace_index(ndd, 1),
107 };
108 const int num_index = ARRAY_SIZE(nsindex);
109 struct device *dev = ndd->dev;
110 bool valid[2] = { 0 };
111 int i, num_valid = 0;
112 u32 seq;
113
114 for (i = 0; i < num_index; i++) {
115 u32 nslot;
116 u8 sig[NSINDEX_SIG_LEN];
117 u64 sum_save, sum, size;
118 unsigned int version, labelsize;
119
120 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
121 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
122 dev_dbg(dev, "%s: nsindex%d signature invalid\n",
123 __func__, i);
124 continue;
125 }
126
127 /* label sizes larger than 128 arrived with v1.2 */
128 version = __le16_to_cpu(nsindex[i]->major) * 100
129 + __le16_to_cpu(nsindex[i]->minor);
130 if (version >= 102)
131 labelsize = 1 << (7 + nsindex[i]->labelsize);
132 else
133 labelsize = 128;
134
135 if (labelsize != sizeof_namespace_label(ndd)) {
136 dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
137 __func__, i, nsindex[i]->labelsize);
138 continue;
139 }
140
141 sum_save = __le64_to_cpu(nsindex[i]->checksum);
142 nsindex[i]->checksum = __cpu_to_le64(0);
143 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
144 nsindex[i]->checksum = __cpu_to_le64(sum_save);
145 if (sum != sum_save) {
146 dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
147 __func__, i);
148 continue;
149 }
150
151 seq = __le32_to_cpu(nsindex[i]->seq);
152 if ((seq & NSINDEX_SEQ_MASK) == 0) {
153 dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
154 __func__, i, seq);
155 continue;
156 }
157
158 /* sanity check the index against expected values */
159 if (__le64_to_cpu(nsindex[i]->myoff)
160 != i * sizeof_namespace_index(ndd)) {
161 dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
162 __func__, i, (unsigned long long)
163 __le64_to_cpu(nsindex[i]->myoff));
164 continue;
165 }
166 if (__le64_to_cpu(nsindex[i]->otheroff)
167 != (!i) * sizeof_namespace_index(ndd)) {
168 dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
169 __func__, i, (unsigned long long)
170 __le64_to_cpu(nsindex[i]->otheroff));
171 continue;
172 }
173
174 size = __le64_to_cpu(nsindex[i]->mysize);
175 if (size > sizeof_namespace_index(ndd)
176 || size < sizeof(struct nd_namespace_index)) {
177 dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
178 __func__, i, size);
179 continue;
180 }
181
182 nslot = __le32_to_cpu(nsindex[i]->nslot);
183 if (nslot * sizeof_namespace_label(ndd)
184 + 2 * sizeof_namespace_index(ndd)
185 > ndd->nsarea.config_size) {
186 dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
187 __func__, i, nslot,
188 ndd->nsarea.config_size);
189 continue;
190 }
191 valid[i] = true;
192 num_valid++;
193 }
194
195 switch (num_valid) {
196 case 0:
197 break;
198 case 1:
199 for (i = 0; i < num_index; i++)
200 if (valid[i])
201 return i;
202 /* can't have num_valid > 0 but valid[] = { false, false } */
203 WARN_ON(1);
204 break;
205 default:
206 /* pick the best index... */
207 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
208 __le32_to_cpu(nsindex[1]->seq));
209 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
210 return 1;
211 else
212 return 0;
213 break;
214 }
215
216 return -1;
217 }
218
219 int nd_label_validate(struct nvdimm_drvdata *ndd)
220 {
221 /*
222 * In order to probe for and validate namespace index blocks we
223 * need to know the size of the labels, and we can't trust the
224 * size of the labels until we validate the index blocks.
225 * Resolve this dependency loop by probing for known label
226 * sizes, but default to v1.2 256-byte namespace labels if
227 * discovery fails.
228 */
229 int label_size[] = { 128, 256 };
230 int i, rc;
231
232 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
233 ndd->nslabel_size = label_size[i];
234 rc = __nd_label_validate(ndd);
235 if (rc >= 0)
236 return rc;
237 }
238
239 return -1;
240 }
241
242 void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
243 struct nd_namespace_index *src)
244 {
245 if (dst && src)
246 /* pass */;
247 else
248 return;
249
250 memcpy(dst, src, sizeof_namespace_index(ndd));
251 }
252
253 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
254 {
255 void *base = to_namespace_index(ndd, 0);
256
257 return base + 2 * sizeof_namespace_index(ndd);
258 }
259
260 static int to_slot(struct nvdimm_drvdata *ndd,
261 struct nd_namespace_label *nd_label)
262 {
263 unsigned long label, base;
264
265 label = (unsigned long) nd_label;
266 base = (unsigned long) nd_label_base(ndd);
267
268 return (label - base) / sizeof_namespace_label(ndd);
269 }
270
271 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
272 {
273 unsigned long label, base;
274
275 base = (unsigned long) nd_label_base(ndd);
276 label = base + sizeof_namespace_label(ndd) * slot;
277
278 return (struct nd_namespace_label *) label;
279 }
280
281 #define for_each_clear_bit_le(bit, addr, size) \
282 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
283 (bit) < (size); \
284 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
285
286 /**
287 * preamble_index - common variable initialization for nd_label_* routines
288 * @ndd: dimm container for the relevant label set
289 * @idx: namespace_index index
290 * @nsindex_out: on return set to the currently active namespace index
291 * @free: on return set to the free label bitmap in the index
292 * @nslot: on return set to the number of slots in the label space
293 */
294 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
295 struct nd_namespace_index **nsindex_out,
296 unsigned long **free, u32 *nslot)
297 {
298 struct nd_namespace_index *nsindex;
299
300 nsindex = to_namespace_index(ndd, idx);
301 if (nsindex == NULL)
302 return false;
303
304 *free = (unsigned long *) nsindex->free;
305 *nslot = __le32_to_cpu(nsindex->nslot);
306 *nsindex_out = nsindex;
307
308 return true;
309 }
310
311 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
312 {
313 if (!label_id || !uuid)
314 return NULL;
315 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
316 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
317 return label_id->id;
318 }
319
320 static bool preamble_current(struct nvdimm_drvdata *ndd,
321 struct nd_namespace_index **nsindex,
322 unsigned long **free, u32 *nslot)
323 {
324 return preamble_index(ndd, ndd->ns_current, nsindex,
325 free, nslot);
326 }
327
328 static bool preamble_next(struct nvdimm_drvdata *ndd,
329 struct nd_namespace_index **nsindex,
330 unsigned long **free, u32 *nslot)
331 {
332 return preamble_index(ndd, ndd->ns_next, nsindex,
333 free, nslot);
334 }
335
336 static bool slot_valid(struct nvdimm_drvdata *ndd,
337 struct nd_namespace_label *nd_label, u32 slot)
338 {
339 /* check that we are written where we expect to be written */
340 if (slot != __le32_to_cpu(nd_label->slot))
341 return false;
342
343 /* check that DPA allocations are page aligned */
344 if ((__le64_to_cpu(nd_label->dpa)
345 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
346 return false;
347
348 /* check checksum */
349 if (namespace_label_has(ndd, checksum)) {
350 u64 sum, sum_save;
351
352 sum_save = __le64_to_cpu(nd_label->checksum);
353 nd_label->checksum = __cpu_to_le64(0);
354 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
355 nd_label->checksum = __cpu_to_le64(sum_save);
356 if (sum != sum_save) {
357 dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
358 __func__, slot, sum);
359 return false;
360 }
361 }
362
363 return true;
364 }
365
366 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
367 {
368 struct nd_namespace_index *nsindex;
369 unsigned long *free;
370 u32 nslot, slot;
371
372 if (!preamble_current(ndd, &nsindex, &free, &nslot))
373 return 0; /* no label, nothing to reserve */
374
375 for_each_clear_bit_le(slot, free, nslot) {
376 struct nd_namespace_label *nd_label;
377 struct nd_region *nd_region = NULL;
378 u8 label_uuid[NSLABEL_UUID_LEN];
379 struct nd_label_id label_id;
380 struct resource *res;
381 u32 flags;
382
383 nd_label = to_label(ndd, slot);
384
385 if (!slot_valid(ndd, nd_label, slot))
386 continue;
387
388 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
389 flags = __le32_to_cpu(nd_label->flags);
390 nd_label_gen_id(&label_id, label_uuid, flags);
391 res = nvdimm_allocate_dpa(ndd, &label_id,
392 __le64_to_cpu(nd_label->dpa),
393 __le64_to_cpu(nd_label->rawsize));
394 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
395 if (!res)
396 return -EBUSY;
397 }
398
399 return 0;
400 }
401
402 int nd_label_active_count(struct nvdimm_drvdata *ndd)
403 {
404 struct nd_namespace_index *nsindex;
405 unsigned long *free;
406 u32 nslot, slot;
407 int count = 0;
408
409 if (!preamble_current(ndd, &nsindex, &free, &nslot))
410 return 0;
411
412 for_each_clear_bit_le(slot, free, nslot) {
413 struct nd_namespace_label *nd_label;
414
415 nd_label = to_label(ndd, slot);
416
417 if (!slot_valid(ndd, nd_label, slot)) {
418 u32 label_slot = __le32_to_cpu(nd_label->slot);
419 u64 size = __le64_to_cpu(nd_label->rawsize);
420 u64 dpa = __le64_to_cpu(nd_label->dpa);
421
422 dev_dbg(ndd->dev,
423 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
424 __func__, slot, label_slot, dpa, size);
425 continue;
426 }
427 count++;
428 }
429 return count;
430 }
431
432 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
433 {
434 struct nd_namespace_index *nsindex;
435 unsigned long *free;
436 u32 nslot, slot;
437
438 if (!preamble_current(ndd, &nsindex, &free, &nslot))
439 return NULL;
440
441 for_each_clear_bit_le(slot, free, nslot) {
442 struct nd_namespace_label *nd_label;
443
444 nd_label = to_label(ndd, slot);
445 if (!slot_valid(ndd, nd_label, slot))
446 continue;
447
448 if (n-- == 0)
449 return to_label(ndd, slot);
450 }
451
452 return NULL;
453 }
454
455 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
456 {
457 struct nd_namespace_index *nsindex;
458 unsigned long *free;
459 u32 nslot, slot;
460
461 if (!preamble_next(ndd, &nsindex, &free, &nslot))
462 return UINT_MAX;
463
464 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
465
466 slot = find_next_bit_le(free, nslot, 0);
467 if (slot == nslot)
468 return UINT_MAX;
469
470 clear_bit_le(slot, free);
471
472 return slot;
473 }
474
475 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
476 {
477 struct nd_namespace_index *nsindex;
478 unsigned long *free;
479 u32 nslot;
480
481 if (!preamble_next(ndd, &nsindex, &free, &nslot))
482 return false;
483
484 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
485
486 if (slot < nslot)
487 return !test_and_set_bit_le(slot, free);
488 return false;
489 }
490
491 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
492 {
493 struct nd_namespace_index *nsindex;
494 unsigned long *free;
495 u32 nslot;
496
497 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
498
499 if (!preamble_next(ndd, &nsindex, &free, &nslot))
500 return nvdimm_num_label_slots(ndd);
501
502 return bitmap_weight(free, nslot);
503 }
504
505 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
506 unsigned long flags)
507 {
508 struct nd_namespace_index *nsindex;
509 unsigned long offset;
510 u64 checksum;
511 u32 nslot;
512 int rc;
513
514 nsindex = to_namespace_index(ndd, index);
515 if (flags & ND_NSINDEX_INIT)
516 nslot = nvdimm_num_label_slots(ndd);
517 else
518 nslot = __le32_to_cpu(nsindex->nslot);
519
520 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
521 memset(&nsindex->flags, 0, 3);
522 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
523 nsindex->seq = __cpu_to_le32(seq);
524 offset = (unsigned long) nsindex
525 - (unsigned long) to_namespace_index(ndd, 0);
526 nsindex->myoff = __cpu_to_le64(offset);
527 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
528 offset = (unsigned long) to_namespace_index(ndd,
529 nd_label_next_nsindex(index))
530 - (unsigned long) to_namespace_index(ndd, 0);
531 nsindex->otheroff = __cpu_to_le64(offset);
532 offset = (unsigned long) nd_label_base(ndd)
533 - (unsigned long) to_namespace_index(ndd, 0);
534 nsindex->labeloff = __cpu_to_le64(offset);
535 nsindex->nslot = __cpu_to_le32(nslot);
536 nsindex->major = __cpu_to_le16(1);
537 if (sizeof_namespace_label(ndd) < 256)
538 nsindex->minor = __cpu_to_le16(1);
539 else
540 nsindex->minor = __cpu_to_le16(2);
541 nsindex->checksum = __cpu_to_le64(0);
542 if (flags & ND_NSINDEX_INIT) {
543 unsigned long *free = (unsigned long *) nsindex->free;
544 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
545 int last_bits, i;
546
547 memset(nsindex->free, 0xff, nfree / 8);
548 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
549 clear_bit_le(nslot + i, free);
550 }
551 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
552 nsindex->checksum = __cpu_to_le64(checksum);
553 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
554 nsindex, sizeof_namespace_index(ndd));
555 if (rc < 0)
556 return rc;
557
558 if (flags & ND_NSINDEX_INIT)
559 return 0;
560
561 /* copy the index we just wrote to the new 'next' */
562 WARN_ON(index != ndd->ns_next);
563 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
564 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
565 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
566 WARN_ON(ndd->ns_current == ndd->ns_next);
567
568 return 0;
569 }
570
571 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
572 struct nd_namespace_label *nd_label)
573 {
574 return (unsigned long) nd_label
575 - (unsigned long) to_namespace_index(ndd, 0);
576 }
577
578 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
579 {
580 if (guid_equal(guid, &nvdimm_btt_guid))
581 return NVDIMM_CCLASS_BTT;
582 else if (guid_equal(guid, &nvdimm_btt2_guid))
583 return NVDIMM_CCLASS_BTT2;
584 else if (guid_equal(guid, &nvdimm_pfn_guid))
585 return NVDIMM_CCLASS_PFN;
586 else if (guid_equal(guid, &nvdimm_dax_guid))
587 return NVDIMM_CCLASS_DAX;
588 else if (guid_equal(guid, &guid_null))
589 return NVDIMM_CCLASS_NONE;
590
591 return NVDIMM_CCLASS_UNKNOWN;
592 }
593
594 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
595 guid_t *target)
596 {
597 if (claim_class == NVDIMM_CCLASS_BTT)
598 return &nvdimm_btt_guid;
599 else if (claim_class == NVDIMM_CCLASS_BTT2)
600 return &nvdimm_btt2_guid;
601 else if (claim_class == NVDIMM_CCLASS_PFN)
602 return &nvdimm_pfn_guid;
603 else if (claim_class == NVDIMM_CCLASS_DAX)
604 return &nvdimm_dax_guid;
605 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
606 /*
607 * If we're modifying a namespace for which we don't
608 * know the claim_class, don't touch the existing guid.
609 */
610 return target;
611 } else
612 return &guid_null;
613 }
614
615 static int __pmem_label_update(struct nd_region *nd_region,
616 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
617 int pos)
618 {
619 struct nd_namespace_common *ndns = &nspm->nsio.common;
620 struct nd_interleave_set *nd_set = nd_region->nd_set;
621 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
622 struct nd_label_ent *label_ent, *victim = NULL;
623 struct nd_namespace_label *nd_label;
624 struct nd_namespace_index *nsindex;
625 struct nd_label_id label_id;
626 struct resource *res;
627 unsigned long *free;
628 u32 nslot, slot;
629 size_t offset;
630 u64 cookie;
631 int rc;
632
633 if (!preamble_next(ndd, &nsindex, &free, &nslot))
634 return -ENXIO;
635
636 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
637 nd_label_gen_id(&label_id, nspm->uuid, 0);
638 for_each_dpa_resource(ndd, res)
639 if (strcmp(res->name, label_id.id) == 0)
640 break;
641
642 if (!res) {
643 WARN_ON_ONCE(1);
644 return -ENXIO;
645 }
646
647 /* allocate and write the label to the staging (next) index */
648 slot = nd_label_alloc_slot(ndd);
649 if (slot == UINT_MAX)
650 return -ENXIO;
651 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
652
653 nd_label = to_label(ndd, slot);
654 memset(nd_label, 0, sizeof_namespace_label(ndd));
655 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
656 if (nspm->alt_name)
657 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
658 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
659 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
660 nd_label->position = __cpu_to_le16(pos);
661 nd_label->isetcookie = __cpu_to_le64(cookie);
662 nd_label->rawsize = __cpu_to_le64(resource_size(res));
663 nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
664 nd_label->dpa = __cpu_to_le64(res->start);
665 nd_label->slot = __cpu_to_le32(slot);
666 if (namespace_label_has(ndd, type_guid))
667 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
668 if (namespace_label_has(ndd, abstraction_guid))
669 guid_copy(&nd_label->abstraction_guid,
670 to_abstraction_guid(ndns->claim_class,
671 &nd_label->abstraction_guid));
672 if (namespace_label_has(ndd, checksum)) {
673 u64 sum;
674
675 nd_label->checksum = __cpu_to_le64(0);
676 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
677 nd_label->checksum = __cpu_to_le64(sum);
678 }
679 nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
680
681 /* update label */
682 offset = nd_label_offset(ndd, nd_label);
683 rc = nvdimm_set_config_data(ndd, offset, nd_label,
684 sizeof_namespace_label(ndd));
685 if (rc < 0)
686 return rc;
687
688 /* Garbage collect the previous label */
689 mutex_lock(&nd_mapping->lock);
690 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
691 if (!label_ent->label)
692 continue;
693 if (memcmp(nspm->uuid, label_ent->label->uuid,
694 NSLABEL_UUID_LEN) != 0)
695 continue;
696 victim = label_ent;
697 list_move_tail(&victim->list, &nd_mapping->labels);
698 break;
699 }
700 if (victim) {
701 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
702 slot = to_slot(ndd, victim->label);
703 nd_label_free_slot(ndd, slot);
704 victim->label = NULL;
705 }
706
707 /* update index */
708 rc = nd_label_write_index(ndd, ndd->ns_next,
709 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
710 if (rc == 0) {
711 list_for_each_entry(label_ent, &nd_mapping->labels, list)
712 if (!label_ent->label) {
713 label_ent->label = nd_label;
714 nd_label = NULL;
715 break;
716 }
717 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
718 "failed to track label: %d\n",
719 to_slot(ndd, nd_label));
720 if (nd_label)
721 rc = -ENXIO;
722 }
723 mutex_unlock(&nd_mapping->lock);
724
725 return rc;
726 }
727
728 static bool is_old_resource(struct resource *res, struct resource **list, int n)
729 {
730 int i;
731
732 if (res->flags & DPA_RESOURCE_ADJUSTED)
733 return false;
734 for (i = 0; i < n; i++)
735 if (res == list[i])
736 return true;
737 return false;
738 }
739
740 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
741 struct nd_namespace_label *nd_label)
742 {
743 struct resource *res;
744
745 for_each_dpa_resource(ndd, res) {
746 if (res->start != __le64_to_cpu(nd_label->dpa))
747 continue;
748 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
749 continue;
750 return res;
751 }
752
753 return NULL;
754 }
755
756 /*
757 * 1/ Account all the labels that can be freed after this update
758 * 2/ Allocate and write the label to the staging (next) index
759 * 3/ Record the resources in the namespace device
760 */
761 static int __blk_label_update(struct nd_region *nd_region,
762 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
763 int num_labels)
764 {
765 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
766 struct nd_interleave_set *nd_set = nd_region->nd_set;
767 struct nd_namespace_common *ndns = &nsblk->common;
768 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
769 struct nd_namespace_label *nd_label;
770 struct nd_label_ent *label_ent, *e;
771 struct nd_namespace_index *nsindex;
772 unsigned long *free, *victim_map = NULL;
773 struct resource *res, **old_res_list;
774 struct nd_label_id label_id;
775 u8 uuid[NSLABEL_UUID_LEN];
776 int min_dpa_idx = 0;
777 LIST_HEAD(list);
778 u32 nslot, slot;
779
780 if (!preamble_next(ndd, &nsindex, &free, &nslot))
781 return -ENXIO;
782
783 old_res_list = nsblk->res;
784 nfree = nd_label_nfree(ndd);
785 old_num_resources = nsblk->num_resources;
786 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
787
788 /*
789 * We need to loop over the old resources a few times, which seems a
790 * bit inefficient, but we need to know that we have the label
791 * space before we start mutating the tracking structures.
792 * Otherwise the recovery method of last resort for userspace is
793 * disable and re-enable the parent region.
794 */
795 alloc = 0;
796 for_each_dpa_resource(ndd, res) {
797 if (strcmp(res->name, label_id.id) != 0)
798 continue;
799 if (!is_old_resource(res, old_res_list, old_num_resources))
800 alloc++;
801 }
802
803 victims = 0;
804 if (old_num_resources) {
805 /* convert old local-label-map to dimm-slot victim-map */
806 victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
807 GFP_KERNEL);
808 if (!victim_map)
809 return -ENOMEM;
810
811 /* mark unused labels for garbage collection */
812 for_each_clear_bit_le(slot, free, nslot) {
813 nd_label = to_label(ndd, slot);
814 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
815 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
816 continue;
817 res = to_resource(ndd, nd_label);
818 if (res && is_old_resource(res, old_res_list,
819 old_num_resources))
820 continue;
821 slot = to_slot(ndd, nd_label);
822 set_bit(slot, victim_map);
823 victims++;
824 }
825 }
826
827 /* don't allow updates that consume the last label */
828 if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
829 dev_info(&nsblk->common.dev, "insufficient label space\n");
830 kfree(victim_map);
831 return -ENOSPC;
832 }
833 /* from here on we need to abort on error */
834
835
836 /* assign all resources to the namespace before writing the labels */
837 nsblk->res = NULL;
838 nsblk->num_resources = 0;
839 for_each_dpa_resource(ndd, res) {
840 if (strcmp(res->name, label_id.id) != 0)
841 continue;
842 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
843 rc = -ENOMEM;
844 goto abort;
845 }
846 }
847
848 /*
849 * Find the resource associated with the first label in the set
850 * per the v1.2 namespace specification.
851 */
852 for (i = 0; i < nsblk->num_resources; i++) {
853 struct resource *min = nsblk->res[min_dpa_idx];
854
855 res = nsblk->res[i];
856 if (res->start < min->start)
857 min_dpa_idx = i;
858 }
859
860 for (i = 0; i < nsblk->num_resources; i++) {
861 size_t offset;
862
863 res = nsblk->res[i];
864 if (is_old_resource(res, old_res_list, old_num_resources))
865 continue; /* carry-over */
866 slot = nd_label_alloc_slot(ndd);
867 if (slot == UINT_MAX)
868 goto abort;
869 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
870
871 nd_label = to_label(ndd, slot);
872 memset(nd_label, 0, sizeof_namespace_label(ndd));
873 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
874 if (nsblk->alt_name)
875 memcpy(nd_label->name, nsblk->alt_name,
876 NSLABEL_NAME_LEN);
877 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
878
879 /*
880 * Use the presence of the type_guid as a flag to
881 * determine isetcookie usage and nlabel + position
882 * policy for blk-aperture namespaces.
883 */
884 if (namespace_label_has(ndd, type_guid)) {
885 if (i == min_dpa_idx) {
886 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
887 nd_label->position = __cpu_to_le16(0);
888 } else {
889 nd_label->nlabel = __cpu_to_le16(0xffff);
890 nd_label->position = __cpu_to_le16(0xffff);
891 }
892 nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
893 } else {
894 nd_label->nlabel = __cpu_to_le16(0); /* N/A */
895 nd_label->position = __cpu_to_le16(0); /* N/A */
896 nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
897 }
898
899 nd_label->dpa = __cpu_to_le64(res->start);
900 nd_label->rawsize = __cpu_to_le64(resource_size(res));
901 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
902 nd_label->slot = __cpu_to_le32(slot);
903 if (namespace_label_has(ndd, type_guid))
904 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
905 if (namespace_label_has(ndd, abstraction_guid))
906 guid_copy(&nd_label->abstraction_guid,
907 to_abstraction_guid(ndns->claim_class,
908 &nd_label->abstraction_guid));
909
910 if (namespace_label_has(ndd, checksum)) {
911 u64 sum;
912
913 nd_label->checksum = __cpu_to_le64(0);
914 sum = nd_fletcher64(nd_label,
915 sizeof_namespace_label(ndd), 1);
916 nd_label->checksum = __cpu_to_le64(sum);
917 }
918
919 /* update label */
920 offset = nd_label_offset(ndd, nd_label);
921 rc = nvdimm_set_config_data(ndd, offset, nd_label,
922 sizeof_namespace_label(ndd));
923 if (rc < 0)
924 goto abort;
925 }
926
927 /* free up now unused slots in the new index */
928 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
929 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
930 nd_label_free_slot(ndd, slot);
931 }
932
933 /* update index */
934 rc = nd_label_write_index(ndd, ndd->ns_next,
935 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
936 if (rc)
937 goto abort;
938
939 /*
940 * Now that the on-dimm labels are up to date, fix up the tracking
941 * entries in nd_mapping->labels
942 */
943 nlabel = 0;
944 mutex_lock(&nd_mapping->lock);
945 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
946 nd_label = label_ent->label;
947 if (!nd_label)
948 continue;
949 nlabel++;
950 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
951 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
952 continue;
953 nlabel--;
954 list_move(&label_ent->list, &list);
955 label_ent->label = NULL;
956 }
957 list_splice_tail_init(&list, &nd_mapping->labels);
958 mutex_unlock(&nd_mapping->lock);
959
960 if (nlabel + nsblk->num_resources > num_labels) {
961 /*
962 * Bug, we can't end up with more resources than
963 * available labels
964 */
965 WARN_ON_ONCE(1);
966 rc = -ENXIO;
967 goto out;
968 }
969
970 mutex_lock(&nd_mapping->lock);
971 label_ent = list_first_entry_or_null(&nd_mapping->labels,
972 typeof(*label_ent), list);
973 if (!label_ent) {
974 WARN_ON(1);
975 mutex_unlock(&nd_mapping->lock);
976 rc = -ENXIO;
977 goto out;
978 }
979 for_each_clear_bit_le(slot, free, nslot) {
980 nd_label = to_label(ndd, slot);
981 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
982 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
983 continue;
984 res = to_resource(ndd, nd_label);
985 res->flags &= ~DPA_RESOURCE_ADJUSTED;
986 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
987 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
988 if (label_ent->label)
989 continue;
990 label_ent->label = nd_label;
991 nd_label = NULL;
992 break;
993 }
994 if (nd_label)
995 dev_WARN(&nsblk->common.dev,
996 "failed to track label slot%d\n", slot);
997 }
998 mutex_unlock(&nd_mapping->lock);
999
1000 out:
1001 kfree(old_res_list);
1002 kfree(victim_map);
1003 return rc;
1004
1005 abort:
1006 /*
1007 * 1/ repair the allocated label bitmap in the index
1008 * 2/ restore the resource list
1009 */
1010 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1011 kfree(nsblk->res);
1012 nsblk->res = old_res_list;
1013 nsblk->num_resources = old_num_resources;
1014 old_res_list = NULL;
1015 goto out;
1016 }
1017
1018 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1019 {
1020 int i, old_num_labels = 0;
1021 struct nd_label_ent *label_ent;
1022 struct nd_namespace_index *nsindex;
1023 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1024
1025 mutex_lock(&nd_mapping->lock);
1026 list_for_each_entry(label_ent, &nd_mapping->labels, list)
1027 old_num_labels++;
1028 mutex_unlock(&nd_mapping->lock);
1029
1030 /*
1031 * We need to preserve all the old labels for the mapping so
1032 * they can be garbage collected after writing the new labels.
1033 */
1034 for (i = old_num_labels; i < num_labels; i++) {
1035 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1036 if (!label_ent)
1037 return -ENOMEM;
1038 mutex_lock(&nd_mapping->lock);
1039 list_add_tail(&label_ent->list, &nd_mapping->labels);
1040 mutex_unlock(&nd_mapping->lock);
1041 }
1042
1043 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1044 /* pass */;
1045 else
1046 return max(num_labels, old_num_labels);
1047
1048 nsindex = to_namespace_index(ndd, 0);
1049 memset(nsindex, 0, ndd->nsarea.config_size);
1050 for (i = 0; i < 2; i++) {
1051 int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
1052
1053 if (rc)
1054 return rc;
1055 }
1056 ndd->ns_next = 1;
1057 ndd->ns_current = 0;
1058
1059 return max(num_labels, old_num_labels);
1060 }
1061
1062 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1063 {
1064 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1065 struct nd_label_ent *label_ent, *e;
1066 struct nd_namespace_index *nsindex;
1067 u8 label_uuid[NSLABEL_UUID_LEN];
1068 unsigned long *free;
1069 LIST_HEAD(list);
1070 u32 nslot, slot;
1071 int active = 0;
1072
1073 if (!uuid)
1074 return 0;
1075
1076 /* no index || no labels == nothing to delete */
1077 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1078 return 0;
1079
1080 mutex_lock(&nd_mapping->lock);
1081 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1082 struct nd_namespace_label *nd_label = label_ent->label;
1083
1084 if (!nd_label)
1085 continue;
1086 active++;
1087 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1088 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1089 continue;
1090 active--;
1091 slot = to_slot(ndd, nd_label);
1092 nd_label_free_slot(ndd, slot);
1093 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
1094 list_move_tail(&label_ent->list, &list);
1095 label_ent->label = NULL;
1096 }
1097 list_splice_tail_init(&list, &nd_mapping->labels);
1098
1099 if (active == 0) {
1100 nd_mapping_free_labels(nd_mapping);
1101 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
1102 }
1103 mutex_unlock(&nd_mapping->lock);
1104
1105 return nd_label_write_index(ndd, ndd->ns_next,
1106 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1107 }
1108
1109 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1110 struct nd_namespace_pmem *nspm, resource_size_t size)
1111 {
1112 int i;
1113
1114 for (i = 0; i < nd_region->ndr_mappings; i++) {
1115 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1116 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1117 struct resource *res;
1118 int rc, count = 0;
1119
1120 if (size == 0) {
1121 rc = del_labels(nd_mapping, nspm->uuid);
1122 if (rc)
1123 return rc;
1124 continue;
1125 }
1126
1127 for_each_dpa_resource(ndd, res)
1128 if (strncmp(res->name, "pmem", 4) == 0)
1129 count++;
1130 WARN_ON_ONCE(!count);
1131
1132 rc = init_labels(nd_mapping, count);
1133 if (rc < 0)
1134 return rc;
1135
1136 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
1137 if (rc)
1138 return rc;
1139 }
1140
1141 return 0;
1142 }
1143
1144 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1145 struct nd_namespace_blk *nsblk, resource_size_t size)
1146 {
1147 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1148 struct resource *res;
1149 int count = 0;
1150
1151 if (size == 0)
1152 return del_labels(nd_mapping, nsblk->uuid);
1153
1154 for_each_dpa_resource(to_ndd(nd_mapping), res)
1155 count++;
1156
1157 count = init_labels(nd_mapping, count);
1158 if (count < 0)
1159 return count;
1160
1161 return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1162 }
1163
1164 int __init nd_label_init(void)
1165 {
1166 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1167 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1168 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1169 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1170
1171 return 0;
1172 }