]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - tools/testing/nvdimm/test/nfit.c
ppp: release rtnl mutex when interface creation fails
[mirror_ubuntu-zesty-kernel.git] / tools / testing / nvdimm / test / nfit.c
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/libnvdimm.h>
17 #include <linux/vmalloc.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/ndctl.h>
22 #include <linux/sizes.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <nfit.h>
26 #include <nd.h>
27 #include "nfit_test.h"
28
29 /*
30 * Generate an NFIT table to describe the following topology:
31 *
32 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
33 *
34 * (a) (b) DIMM BLK-REGION
35 * +----------+--------------+----------+---------+
36 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
37 * | imc0 +--+- - - - - region0 - - - -+----------+ +
38 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
39 * | +----------+--------------v----------v v
40 * +--+---+ | |
41 * | cpu0 | region1
42 * +--+---+ | |
43 * | +-------------------------^----------^ ^
44 * +--+---+ | blk4.0 | pm1.0 | 2 region4
45 * | imc1 +--+-------------------------+----------+ +
46 * +------+ | blk5.0 | pm1.0 | 3 region5
47 * +-------------------------+----------+-+-------+
48 *
49 * +--+---+
50 * | cpu1 |
51 * +--+---+ (Hotplug DIMM)
52 * | +----------------------------------------------+
53 * +--+---+ | blk6.0/pm7.0 | 4 region6/7
54 * | imc0 +--+----------------------------------------------+
55 * +------+
56 *
57 *
58 * *) In this layout we have four dimms and two memory controllers in one
59 * socket. Each unique interface (BLK or PMEM) to DPA space
60 * is identified by a region device with a dynamically assigned id.
61 *
62 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
63 * A single PMEM namespace "pm0.0" is created using half of the
64 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
65 * allocate from from the bottom of a region. The unallocated
66 * portion of REGION0 aliases with REGION2 and REGION3. That
67 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
68 * "blk3.0") starting at the base of each DIMM to offset (a) in those
69 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
70 * names that can be assigned to a namespace.
71 *
72 * *) In the last portion of dimm0 and dimm1 we have an interleaved
73 * SPA range, REGION1, that spans those two dimms as well as dimm2
74 * and dimm3. Some of REGION1 allocated to a PMEM namespace named
75 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
76 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
77 * "blk5.0".
78 *
79 * *) The portion of dimm2 and dimm3 that do not participate in the
80 * REGION1 interleaved SPA range (i.e. the DPA address below offset
81 * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
82 * Note, that BLK namespaces need not be contiguous in DPA-space, and
83 * can consume aliased capacity from multiple interleave sets.
84 *
85 * BUS1: Legacy NVDIMM (single contiguous range)
86 *
87 * region2
88 * +---------------------+
89 * |---------------------|
90 * || pm2.0 ||
91 * |---------------------|
92 * +---------------------+
93 *
94 * *) A NFIT-table may describe a simple system-physical-address range
95 * with no BLK aliasing. This type of region may optionally
96 * reference an NVDIMM.
97 */
98 enum {
99 NUM_PM = 3,
100 NUM_DCR = 5,
101 NUM_BDW = NUM_DCR,
102 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
103 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
104 DIMM_SIZE = SZ_32M,
105 LABEL_SIZE = SZ_128K,
106 SPA0_SIZE = DIMM_SIZE,
107 SPA1_SIZE = DIMM_SIZE*2,
108 SPA2_SIZE = DIMM_SIZE,
109 BDW_SIZE = 64 << 8,
110 DCR_SIZE = 12,
111 NUM_NFITS = 2, /* permit testing multiple NFITs per system */
112 };
113
114 struct nfit_test_dcr {
115 __le64 bdw_addr;
116 __le32 bdw_status;
117 __u8 aperature[BDW_SIZE];
118 };
119
120 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
121 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
122 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
123
124 static u32 handle[NUM_DCR] = {
125 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
126 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
127 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
128 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
129 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
130 };
131
132 struct nfit_test {
133 struct acpi_nfit_desc acpi_desc;
134 struct platform_device pdev;
135 struct list_head resources;
136 void *nfit_buf;
137 dma_addr_t nfit_dma;
138 size_t nfit_size;
139 int num_dcr;
140 int num_pm;
141 void **dimm;
142 dma_addr_t *dimm_dma;
143 void **flush;
144 dma_addr_t *flush_dma;
145 void **label;
146 dma_addr_t *label_dma;
147 void **spa_set;
148 dma_addr_t *spa_set_dma;
149 struct nfit_test_dcr **dcr;
150 dma_addr_t *dcr_dma;
151 int (*alloc)(struct nfit_test *t);
152 void (*setup)(struct nfit_test *t);
153 int setup_hotplug;
154 };
155
156 static struct nfit_test *to_nfit_test(struct device *dev)
157 {
158 struct platform_device *pdev = to_platform_device(dev);
159
160 return container_of(pdev, struct nfit_test, pdev);
161 }
162
163 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
164 unsigned int buf_len)
165 {
166 if (buf_len < sizeof(*nd_cmd))
167 return -EINVAL;
168
169 nd_cmd->status = 0;
170 nd_cmd->config_size = LABEL_SIZE;
171 nd_cmd->max_xfer = SZ_4K;
172
173 return 0;
174 }
175
176 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
177 *nd_cmd, unsigned int buf_len, void *label)
178 {
179 unsigned int len, offset = nd_cmd->in_offset;
180 int rc;
181
182 if (buf_len < sizeof(*nd_cmd))
183 return -EINVAL;
184 if (offset >= LABEL_SIZE)
185 return -EINVAL;
186 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
187 return -EINVAL;
188
189 nd_cmd->status = 0;
190 len = min(nd_cmd->in_length, LABEL_SIZE - offset);
191 memcpy(nd_cmd->out_buf, label + offset, len);
192 rc = buf_len - sizeof(*nd_cmd) - len;
193
194 return rc;
195 }
196
197 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
198 unsigned int buf_len, void *label)
199 {
200 unsigned int len, offset = nd_cmd->in_offset;
201 u32 *status;
202 int rc;
203
204 if (buf_len < sizeof(*nd_cmd))
205 return -EINVAL;
206 if (offset >= LABEL_SIZE)
207 return -EINVAL;
208 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
209 return -EINVAL;
210
211 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
212 *status = 0;
213 len = min(nd_cmd->in_length, LABEL_SIZE - offset);
214 memcpy(label + offset, nd_cmd->in_buf, len);
215 rc = buf_len - sizeof(*nd_cmd) - (len + 4);
216
217 return rc;
218 }
219
220 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
221 unsigned int buf_len)
222 {
223 if (buf_len < sizeof(*nd_cmd))
224 return -EINVAL;
225
226 nd_cmd->max_ars_out = 256;
227 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
228
229 return 0;
230 }
231
232 static int nfit_test_cmd_ars_start(struct nd_cmd_ars_start *nd_cmd,
233 unsigned int buf_len)
234 {
235 if (buf_len < sizeof(*nd_cmd))
236 return -EINVAL;
237
238 nd_cmd->status = 0;
239
240 return 0;
241 }
242
243 static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
244 unsigned int buf_len)
245 {
246 if (buf_len < sizeof(*nd_cmd))
247 return -EINVAL;
248
249 nd_cmd->out_length = 256;
250 nd_cmd->num_records = 0;
251 nd_cmd->address = 0;
252 nd_cmd->length = -1ULL;
253 nd_cmd->status = 0;
254
255 return 0;
256 }
257
258 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
259 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
260 unsigned int buf_len)
261 {
262 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
263 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
264 int i, rc = 0;
265
266 if (nvdimm) {
267 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
268
269 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
270 return -ENOTTY;
271
272 /* lookup label space for the given dimm */
273 for (i = 0; i < ARRAY_SIZE(handle); i++)
274 if (__to_nfit_memdev(nfit_mem)->device_handle ==
275 handle[i])
276 break;
277 if (i >= ARRAY_SIZE(handle))
278 return -ENXIO;
279
280 switch (cmd) {
281 case ND_CMD_GET_CONFIG_SIZE:
282 rc = nfit_test_cmd_get_config_size(buf, buf_len);
283 break;
284 case ND_CMD_GET_CONFIG_DATA:
285 rc = nfit_test_cmd_get_config_data(buf, buf_len,
286 t->label[i]);
287 break;
288 case ND_CMD_SET_CONFIG_DATA:
289 rc = nfit_test_cmd_set_config_data(buf, buf_len,
290 t->label[i]);
291 break;
292 default:
293 return -ENOTTY;
294 }
295 } else {
296 if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
297 return -ENOTTY;
298
299 switch (cmd) {
300 case ND_CMD_ARS_CAP:
301 rc = nfit_test_cmd_ars_cap(buf, buf_len);
302 break;
303 case ND_CMD_ARS_START:
304 rc = nfit_test_cmd_ars_start(buf, buf_len);
305 break;
306 case ND_CMD_ARS_STATUS:
307 rc = nfit_test_cmd_ars_status(buf, buf_len);
308 break;
309 default:
310 return -ENOTTY;
311 }
312 }
313
314 return rc;
315 }
316
317 static DEFINE_SPINLOCK(nfit_test_lock);
318 static struct nfit_test *instances[NUM_NFITS];
319
320 static void release_nfit_res(void *data)
321 {
322 struct nfit_test_resource *nfit_res = data;
323 struct resource *res = nfit_res->res;
324
325 spin_lock(&nfit_test_lock);
326 list_del(&nfit_res->list);
327 spin_unlock(&nfit_test_lock);
328
329 if (is_vmalloc_addr(nfit_res->buf))
330 vfree(nfit_res->buf);
331 else
332 dma_free_coherent(nfit_res->dev, resource_size(res),
333 nfit_res->buf, res->start);
334 kfree(res);
335 kfree(nfit_res);
336 }
337
338 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
339 void *buf)
340 {
341 struct device *dev = &t->pdev.dev;
342 struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
343 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
344 GFP_KERNEL);
345 int rc;
346
347 if (!res || !buf || !nfit_res)
348 goto err;
349 rc = devm_add_action(dev, release_nfit_res, nfit_res);
350 if (rc)
351 goto err;
352 INIT_LIST_HEAD(&nfit_res->list);
353 memset(buf, 0, size);
354 nfit_res->dev = dev;
355 nfit_res->buf = buf;
356 nfit_res->res = res;
357 res->start = *dma;
358 res->end = *dma + size - 1;
359 res->name = "NFIT";
360 spin_lock(&nfit_test_lock);
361 list_add(&nfit_res->list, &t->resources);
362 spin_unlock(&nfit_test_lock);
363
364 return nfit_res->buf;
365 err:
366 if (buf && !is_vmalloc_addr(buf))
367 dma_free_coherent(dev, size, buf, *dma);
368 else if (buf)
369 vfree(buf);
370 kfree(res);
371 kfree(nfit_res);
372 return NULL;
373 }
374
375 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
376 {
377 void *buf = vmalloc(size);
378
379 *dma = (unsigned long) buf;
380 return __test_alloc(t, size, dma, buf);
381 }
382
383 static void *test_alloc_coherent(struct nfit_test *t, size_t size,
384 dma_addr_t *dma)
385 {
386 struct device *dev = &t->pdev.dev;
387 void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
388
389 return __test_alloc(t, size, dma, buf);
390 }
391
392 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
393 {
394 int i;
395
396 for (i = 0; i < ARRAY_SIZE(instances); i++) {
397 struct nfit_test_resource *n, *nfit_res = NULL;
398 struct nfit_test *t = instances[i];
399
400 if (!t)
401 continue;
402 spin_lock(&nfit_test_lock);
403 list_for_each_entry(n, &t->resources, list) {
404 if (addr >= n->res->start && (addr < n->res->start
405 + resource_size(n->res))) {
406 nfit_res = n;
407 break;
408 } else if (addr >= (unsigned long) n->buf
409 && (addr < (unsigned long) n->buf
410 + resource_size(n->res))) {
411 nfit_res = n;
412 break;
413 }
414 }
415 spin_unlock(&nfit_test_lock);
416 if (nfit_res)
417 return nfit_res;
418 }
419
420 return NULL;
421 }
422
423 static int nfit_test0_alloc(struct nfit_test *t)
424 {
425 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
426 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
427 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
428 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
429 + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
430 int i;
431
432 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
433 if (!t->nfit_buf)
434 return -ENOMEM;
435 t->nfit_size = nfit_size;
436
437 t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
438 if (!t->spa_set[0])
439 return -ENOMEM;
440
441 t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
442 if (!t->spa_set[1])
443 return -ENOMEM;
444
445 t->spa_set[2] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[2]);
446 if (!t->spa_set[2])
447 return -ENOMEM;
448
449 for (i = 0; i < NUM_DCR; i++) {
450 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
451 if (!t->dimm[i])
452 return -ENOMEM;
453
454 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
455 if (!t->label[i])
456 return -ENOMEM;
457 sprintf(t->label[i], "label%d", i);
458
459 t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
460 if (!t->flush[i])
461 return -ENOMEM;
462 }
463
464 for (i = 0; i < NUM_DCR; i++) {
465 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
466 if (!t->dcr[i])
467 return -ENOMEM;
468 }
469
470 return 0;
471 }
472
473 static int nfit_test1_alloc(struct nfit_test *t)
474 {
475 size_t nfit_size = sizeof(struct acpi_nfit_system_address)
476 + sizeof(struct acpi_nfit_memory_map)
477 + sizeof(struct acpi_nfit_control_region);
478
479 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
480 if (!t->nfit_buf)
481 return -ENOMEM;
482 t->nfit_size = nfit_size;
483
484 t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
485 if (!t->spa_set[0])
486 return -ENOMEM;
487
488 return 0;
489 }
490
491 static void nfit_test0_setup(struct nfit_test *t)
492 {
493 struct nvdimm_bus_descriptor *nd_desc;
494 struct acpi_nfit_desc *acpi_desc;
495 struct acpi_nfit_memory_map *memdev;
496 void *nfit_buf = t->nfit_buf;
497 struct acpi_nfit_system_address *spa;
498 struct acpi_nfit_control_region *dcr;
499 struct acpi_nfit_data_region *bdw;
500 struct acpi_nfit_flush_address *flush;
501 unsigned int offset;
502
503 /*
504 * spa0 (interleave first half of dimm0 and dimm1, note storage
505 * does not actually alias the related block-data-window
506 * regions)
507 */
508 spa = nfit_buf;
509 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
510 spa->header.length = sizeof(*spa);
511 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
512 spa->range_index = 0+1;
513 spa->address = t->spa_set_dma[0];
514 spa->length = SPA0_SIZE;
515
516 /*
517 * spa1 (interleave last half of the 4 DIMMS, note storage
518 * does not actually alias the related block-data-window
519 * regions)
520 */
521 spa = nfit_buf + sizeof(*spa);
522 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
523 spa->header.length = sizeof(*spa);
524 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
525 spa->range_index = 1+1;
526 spa->address = t->spa_set_dma[1];
527 spa->length = SPA1_SIZE;
528
529 /* spa2 (dcr0) dimm0 */
530 spa = nfit_buf + sizeof(*spa) * 2;
531 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
532 spa->header.length = sizeof(*spa);
533 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
534 spa->range_index = 2+1;
535 spa->address = t->dcr_dma[0];
536 spa->length = DCR_SIZE;
537
538 /* spa3 (dcr1) dimm1 */
539 spa = nfit_buf + sizeof(*spa) * 3;
540 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
541 spa->header.length = sizeof(*spa);
542 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
543 spa->range_index = 3+1;
544 spa->address = t->dcr_dma[1];
545 spa->length = DCR_SIZE;
546
547 /* spa4 (dcr2) dimm2 */
548 spa = nfit_buf + sizeof(*spa) * 4;
549 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
550 spa->header.length = sizeof(*spa);
551 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
552 spa->range_index = 4+1;
553 spa->address = t->dcr_dma[2];
554 spa->length = DCR_SIZE;
555
556 /* spa5 (dcr3) dimm3 */
557 spa = nfit_buf + sizeof(*spa) * 5;
558 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
559 spa->header.length = sizeof(*spa);
560 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
561 spa->range_index = 5+1;
562 spa->address = t->dcr_dma[3];
563 spa->length = DCR_SIZE;
564
565 /* spa6 (bdw for dcr0) dimm0 */
566 spa = nfit_buf + sizeof(*spa) * 6;
567 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
568 spa->header.length = sizeof(*spa);
569 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
570 spa->range_index = 6+1;
571 spa->address = t->dimm_dma[0];
572 spa->length = DIMM_SIZE;
573
574 /* spa7 (bdw for dcr1) dimm1 */
575 spa = nfit_buf + sizeof(*spa) * 7;
576 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
577 spa->header.length = sizeof(*spa);
578 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
579 spa->range_index = 7+1;
580 spa->address = t->dimm_dma[1];
581 spa->length = DIMM_SIZE;
582
583 /* spa8 (bdw for dcr2) dimm2 */
584 spa = nfit_buf + sizeof(*spa) * 8;
585 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
586 spa->header.length = sizeof(*spa);
587 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
588 spa->range_index = 8+1;
589 spa->address = t->dimm_dma[2];
590 spa->length = DIMM_SIZE;
591
592 /* spa9 (bdw for dcr3) dimm3 */
593 spa = nfit_buf + sizeof(*spa) * 9;
594 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
595 spa->header.length = sizeof(*spa);
596 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
597 spa->range_index = 9+1;
598 spa->address = t->dimm_dma[3];
599 spa->length = DIMM_SIZE;
600
601 offset = sizeof(*spa) * 10;
602 /* mem-region0 (spa0, dimm0) */
603 memdev = nfit_buf + offset;
604 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
605 memdev->header.length = sizeof(*memdev);
606 memdev->device_handle = handle[0];
607 memdev->physical_id = 0;
608 memdev->region_id = 0;
609 memdev->range_index = 0+1;
610 memdev->region_index = 0+1;
611 memdev->region_size = SPA0_SIZE/2;
612 memdev->region_offset = t->spa_set_dma[0];
613 memdev->address = 0;
614 memdev->interleave_index = 0;
615 memdev->interleave_ways = 2;
616
617 /* mem-region1 (spa0, dimm1) */
618 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
619 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
620 memdev->header.length = sizeof(*memdev);
621 memdev->device_handle = handle[1];
622 memdev->physical_id = 1;
623 memdev->region_id = 0;
624 memdev->range_index = 0+1;
625 memdev->region_index = 1+1;
626 memdev->region_size = SPA0_SIZE/2;
627 memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
628 memdev->address = 0;
629 memdev->interleave_index = 0;
630 memdev->interleave_ways = 2;
631
632 /* mem-region2 (spa1, dimm0) */
633 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
634 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
635 memdev->header.length = sizeof(*memdev);
636 memdev->device_handle = handle[0];
637 memdev->physical_id = 0;
638 memdev->region_id = 1;
639 memdev->range_index = 1+1;
640 memdev->region_index = 0+1;
641 memdev->region_size = SPA1_SIZE/4;
642 memdev->region_offset = t->spa_set_dma[1];
643 memdev->address = SPA0_SIZE/2;
644 memdev->interleave_index = 0;
645 memdev->interleave_ways = 4;
646
647 /* mem-region3 (spa1, dimm1) */
648 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
649 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
650 memdev->header.length = sizeof(*memdev);
651 memdev->device_handle = handle[1];
652 memdev->physical_id = 1;
653 memdev->region_id = 1;
654 memdev->range_index = 1+1;
655 memdev->region_index = 1+1;
656 memdev->region_size = SPA1_SIZE/4;
657 memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
658 memdev->address = SPA0_SIZE/2;
659 memdev->interleave_index = 0;
660 memdev->interleave_ways = 4;
661
662 /* mem-region4 (spa1, dimm2) */
663 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
664 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
665 memdev->header.length = sizeof(*memdev);
666 memdev->device_handle = handle[2];
667 memdev->physical_id = 2;
668 memdev->region_id = 0;
669 memdev->range_index = 1+1;
670 memdev->region_index = 2+1;
671 memdev->region_size = SPA1_SIZE/4;
672 memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
673 memdev->address = SPA0_SIZE/2;
674 memdev->interleave_index = 0;
675 memdev->interleave_ways = 4;
676
677 /* mem-region5 (spa1, dimm3) */
678 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
679 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
680 memdev->header.length = sizeof(*memdev);
681 memdev->device_handle = handle[3];
682 memdev->physical_id = 3;
683 memdev->region_id = 0;
684 memdev->range_index = 1+1;
685 memdev->region_index = 3+1;
686 memdev->region_size = SPA1_SIZE/4;
687 memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
688 memdev->address = SPA0_SIZE/2;
689 memdev->interleave_index = 0;
690 memdev->interleave_ways = 4;
691
692 /* mem-region6 (spa/dcr0, dimm0) */
693 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
694 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
695 memdev->header.length = sizeof(*memdev);
696 memdev->device_handle = handle[0];
697 memdev->physical_id = 0;
698 memdev->region_id = 0;
699 memdev->range_index = 2+1;
700 memdev->region_index = 0+1;
701 memdev->region_size = 0;
702 memdev->region_offset = 0;
703 memdev->address = 0;
704 memdev->interleave_index = 0;
705 memdev->interleave_ways = 1;
706
707 /* mem-region7 (spa/dcr1, dimm1) */
708 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
709 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
710 memdev->header.length = sizeof(*memdev);
711 memdev->device_handle = handle[1];
712 memdev->physical_id = 1;
713 memdev->region_id = 0;
714 memdev->range_index = 3+1;
715 memdev->region_index = 1+1;
716 memdev->region_size = 0;
717 memdev->region_offset = 0;
718 memdev->address = 0;
719 memdev->interleave_index = 0;
720 memdev->interleave_ways = 1;
721
722 /* mem-region8 (spa/dcr2, dimm2) */
723 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
724 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
725 memdev->header.length = sizeof(*memdev);
726 memdev->device_handle = handle[2];
727 memdev->physical_id = 2;
728 memdev->region_id = 0;
729 memdev->range_index = 4+1;
730 memdev->region_index = 2+1;
731 memdev->region_size = 0;
732 memdev->region_offset = 0;
733 memdev->address = 0;
734 memdev->interleave_index = 0;
735 memdev->interleave_ways = 1;
736
737 /* mem-region9 (spa/dcr3, dimm3) */
738 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
739 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
740 memdev->header.length = sizeof(*memdev);
741 memdev->device_handle = handle[3];
742 memdev->physical_id = 3;
743 memdev->region_id = 0;
744 memdev->range_index = 5+1;
745 memdev->region_index = 3+1;
746 memdev->region_size = 0;
747 memdev->region_offset = 0;
748 memdev->address = 0;
749 memdev->interleave_index = 0;
750 memdev->interleave_ways = 1;
751
752 /* mem-region10 (spa/bdw0, dimm0) */
753 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
754 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
755 memdev->header.length = sizeof(*memdev);
756 memdev->device_handle = handle[0];
757 memdev->physical_id = 0;
758 memdev->region_id = 0;
759 memdev->range_index = 6+1;
760 memdev->region_index = 0+1;
761 memdev->region_size = 0;
762 memdev->region_offset = 0;
763 memdev->address = 0;
764 memdev->interleave_index = 0;
765 memdev->interleave_ways = 1;
766
767 /* mem-region11 (spa/bdw1, dimm1) */
768 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
769 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
770 memdev->header.length = sizeof(*memdev);
771 memdev->device_handle = handle[1];
772 memdev->physical_id = 1;
773 memdev->region_id = 0;
774 memdev->range_index = 7+1;
775 memdev->region_index = 1+1;
776 memdev->region_size = 0;
777 memdev->region_offset = 0;
778 memdev->address = 0;
779 memdev->interleave_index = 0;
780 memdev->interleave_ways = 1;
781
782 /* mem-region12 (spa/bdw2, dimm2) */
783 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
784 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
785 memdev->header.length = sizeof(*memdev);
786 memdev->device_handle = handle[2];
787 memdev->physical_id = 2;
788 memdev->region_id = 0;
789 memdev->range_index = 8+1;
790 memdev->region_index = 2+1;
791 memdev->region_size = 0;
792 memdev->region_offset = 0;
793 memdev->address = 0;
794 memdev->interleave_index = 0;
795 memdev->interleave_ways = 1;
796
797 /* mem-region13 (spa/dcr3, dimm3) */
798 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
799 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
800 memdev->header.length = sizeof(*memdev);
801 memdev->device_handle = handle[3];
802 memdev->physical_id = 3;
803 memdev->region_id = 0;
804 memdev->range_index = 9+1;
805 memdev->region_index = 3+1;
806 memdev->region_size = 0;
807 memdev->region_offset = 0;
808 memdev->address = 0;
809 memdev->interleave_index = 0;
810 memdev->interleave_ways = 1;
811
812 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
813 /* dcr-descriptor0 */
814 dcr = nfit_buf + offset;
815 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
816 dcr->header.length = sizeof(struct acpi_nfit_control_region);
817 dcr->region_index = 0+1;
818 dcr->vendor_id = 0xabcd;
819 dcr->device_id = 0;
820 dcr->revision_id = 1;
821 dcr->serial_number = ~handle[0];
822 dcr->windows = 1;
823 dcr->window_size = DCR_SIZE;
824 dcr->command_offset = 0;
825 dcr->command_size = 8;
826 dcr->status_offset = 8;
827 dcr->status_size = 4;
828
829 /* dcr-descriptor1 */
830 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
831 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
832 dcr->header.length = sizeof(struct acpi_nfit_control_region);
833 dcr->region_index = 1+1;
834 dcr->vendor_id = 0xabcd;
835 dcr->device_id = 0;
836 dcr->revision_id = 1;
837 dcr->serial_number = ~handle[1];
838 dcr->windows = 1;
839 dcr->window_size = DCR_SIZE;
840 dcr->command_offset = 0;
841 dcr->command_size = 8;
842 dcr->status_offset = 8;
843 dcr->status_size = 4;
844
845 /* dcr-descriptor2 */
846 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
847 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
848 dcr->header.length = sizeof(struct acpi_nfit_control_region);
849 dcr->region_index = 2+1;
850 dcr->vendor_id = 0xabcd;
851 dcr->device_id = 0;
852 dcr->revision_id = 1;
853 dcr->serial_number = ~handle[2];
854 dcr->windows = 1;
855 dcr->window_size = DCR_SIZE;
856 dcr->command_offset = 0;
857 dcr->command_size = 8;
858 dcr->status_offset = 8;
859 dcr->status_size = 4;
860
861 /* dcr-descriptor3 */
862 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
863 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
864 dcr->header.length = sizeof(struct acpi_nfit_control_region);
865 dcr->region_index = 3+1;
866 dcr->vendor_id = 0xabcd;
867 dcr->device_id = 0;
868 dcr->revision_id = 1;
869 dcr->serial_number = ~handle[3];
870 dcr->windows = 1;
871 dcr->window_size = DCR_SIZE;
872 dcr->command_offset = 0;
873 dcr->command_size = 8;
874 dcr->status_offset = 8;
875 dcr->status_size = 4;
876
877 offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
878 /* bdw0 (spa/dcr0, dimm0) */
879 bdw = nfit_buf + offset;
880 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
881 bdw->header.length = sizeof(struct acpi_nfit_data_region);
882 bdw->region_index = 0+1;
883 bdw->windows = 1;
884 bdw->offset = 0;
885 bdw->size = BDW_SIZE;
886 bdw->capacity = DIMM_SIZE;
887 bdw->start_address = 0;
888
889 /* bdw1 (spa/dcr1, dimm1) */
890 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
891 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
892 bdw->header.length = sizeof(struct acpi_nfit_data_region);
893 bdw->region_index = 1+1;
894 bdw->windows = 1;
895 bdw->offset = 0;
896 bdw->size = BDW_SIZE;
897 bdw->capacity = DIMM_SIZE;
898 bdw->start_address = 0;
899
900 /* bdw2 (spa/dcr2, dimm2) */
901 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
902 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
903 bdw->header.length = sizeof(struct acpi_nfit_data_region);
904 bdw->region_index = 2+1;
905 bdw->windows = 1;
906 bdw->offset = 0;
907 bdw->size = BDW_SIZE;
908 bdw->capacity = DIMM_SIZE;
909 bdw->start_address = 0;
910
911 /* bdw3 (spa/dcr3, dimm3) */
912 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
913 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
914 bdw->header.length = sizeof(struct acpi_nfit_data_region);
915 bdw->region_index = 3+1;
916 bdw->windows = 1;
917 bdw->offset = 0;
918 bdw->size = BDW_SIZE;
919 bdw->capacity = DIMM_SIZE;
920 bdw->start_address = 0;
921
922 offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
923 /* flush0 (dimm0) */
924 flush = nfit_buf + offset;
925 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
926 flush->header.length = sizeof(struct acpi_nfit_flush_address);
927 flush->device_handle = handle[0];
928 flush->hint_count = 1;
929 flush->hint_address[0] = t->flush_dma[0];
930
931 /* flush1 (dimm1) */
932 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
933 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
934 flush->header.length = sizeof(struct acpi_nfit_flush_address);
935 flush->device_handle = handle[1];
936 flush->hint_count = 1;
937 flush->hint_address[0] = t->flush_dma[1];
938
939 /* flush2 (dimm2) */
940 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
941 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
942 flush->header.length = sizeof(struct acpi_nfit_flush_address);
943 flush->device_handle = handle[2];
944 flush->hint_count = 1;
945 flush->hint_address[0] = t->flush_dma[2];
946
947 /* flush3 (dimm3) */
948 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
949 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
950 flush->header.length = sizeof(struct acpi_nfit_flush_address);
951 flush->device_handle = handle[3];
952 flush->hint_count = 1;
953 flush->hint_address[0] = t->flush_dma[3];
954
955 if (t->setup_hotplug) {
956 offset = offset + sizeof(struct acpi_nfit_flush_address) * 4;
957 /* dcr-descriptor4 */
958 dcr = nfit_buf + offset;
959 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
960 dcr->header.length = sizeof(struct acpi_nfit_control_region);
961 dcr->region_index = 4+1;
962 dcr->vendor_id = 0xabcd;
963 dcr->device_id = 0;
964 dcr->revision_id = 1;
965 dcr->serial_number = ~handle[4];
966 dcr->windows = 1;
967 dcr->window_size = DCR_SIZE;
968 dcr->command_offset = 0;
969 dcr->command_size = 8;
970 dcr->status_offset = 8;
971 dcr->status_size = 4;
972
973 offset = offset + sizeof(struct acpi_nfit_control_region);
974 /* bdw4 (spa/dcr4, dimm4) */
975 bdw = nfit_buf + offset;
976 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
977 bdw->header.length = sizeof(struct acpi_nfit_data_region);
978 bdw->region_index = 4+1;
979 bdw->windows = 1;
980 bdw->offset = 0;
981 bdw->size = BDW_SIZE;
982 bdw->capacity = DIMM_SIZE;
983 bdw->start_address = 0;
984
985 offset = offset + sizeof(struct acpi_nfit_data_region);
986 /* spa10 (dcr4) dimm4 */
987 spa = nfit_buf + offset;
988 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
989 spa->header.length = sizeof(*spa);
990 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
991 spa->range_index = 10+1;
992 spa->address = t->dcr_dma[4];
993 spa->length = DCR_SIZE;
994
995 /*
996 * spa11 (single-dimm interleave for hotplug, note storage
997 * does not actually alias the related block-data-window
998 * regions)
999 */
1000 spa = nfit_buf + offset + sizeof(*spa);
1001 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1002 spa->header.length = sizeof(*spa);
1003 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1004 spa->range_index = 11+1;
1005 spa->address = t->spa_set_dma[2];
1006 spa->length = SPA0_SIZE;
1007
1008 /* spa12 (bdw for dcr4) dimm4 */
1009 spa = nfit_buf + offset + sizeof(*spa) * 2;
1010 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1011 spa->header.length = sizeof(*spa);
1012 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1013 spa->range_index = 12+1;
1014 spa->address = t->dimm_dma[4];
1015 spa->length = DIMM_SIZE;
1016
1017 offset = offset + sizeof(*spa) * 3;
1018 /* mem-region14 (spa/dcr4, dimm4) */
1019 memdev = nfit_buf + offset;
1020 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1021 memdev->header.length = sizeof(*memdev);
1022 memdev->device_handle = handle[4];
1023 memdev->physical_id = 4;
1024 memdev->region_id = 0;
1025 memdev->range_index = 10+1;
1026 memdev->region_index = 4+1;
1027 memdev->region_size = 0;
1028 memdev->region_offset = 0;
1029 memdev->address = 0;
1030 memdev->interleave_index = 0;
1031 memdev->interleave_ways = 1;
1032
1033 /* mem-region15 (spa0, dimm4) */
1034 memdev = nfit_buf + offset +
1035 sizeof(struct acpi_nfit_memory_map);
1036 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1037 memdev->header.length = sizeof(*memdev);
1038 memdev->device_handle = handle[4];
1039 memdev->physical_id = 4;
1040 memdev->region_id = 0;
1041 memdev->range_index = 11+1;
1042 memdev->region_index = 4+1;
1043 memdev->region_size = SPA0_SIZE;
1044 memdev->region_offset = t->spa_set_dma[2];
1045 memdev->address = 0;
1046 memdev->interleave_index = 0;
1047 memdev->interleave_ways = 1;
1048
1049 /* mem-region16 (spa/dcr4, dimm4) */
1050 memdev = nfit_buf + offset +
1051 sizeof(struct acpi_nfit_memory_map) * 2;
1052 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1053 memdev->header.length = sizeof(*memdev);
1054 memdev->device_handle = handle[4];
1055 memdev->physical_id = 4;
1056 memdev->region_id = 0;
1057 memdev->range_index = 12+1;
1058 memdev->region_index = 4+1;
1059 memdev->region_size = 0;
1060 memdev->region_offset = 0;
1061 memdev->address = 0;
1062 memdev->interleave_index = 0;
1063 memdev->interleave_ways = 1;
1064
1065 offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1066 /* flush3 (dimm4) */
1067 flush = nfit_buf + offset;
1068 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1069 flush->header.length = sizeof(struct acpi_nfit_flush_address);
1070 flush->device_handle = handle[4];
1071 flush->hint_count = 1;
1072 flush->hint_address[0] = t->flush_dma[4];
1073 }
1074
1075 acpi_desc = &t->acpi_desc;
1076 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
1077 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
1078 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
1079 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
1080 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
1081 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
1082 nd_desc = &acpi_desc->nd_desc;
1083 nd_desc->ndctl = nfit_test_ctl;
1084 }
1085
1086 static void nfit_test1_setup(struct nfit_test *t)
1087 {
1088 size_t offset;
1089 void *nfit_buf = t->nfit_buf;
1090 struct acpi_nfit_memory_map *memdev;
1091 struct acpi_nfit_control_region *dcr;
1092 struct acpi_nfit_system_address *spa;
1093 struct nvdimm_bus_descriptor *nd_desc;
1094 struct acpi_nfit_desc *acpi_desc;
1095
1096 offset = 0;
1097 /* spa0 (flat range with no bdw aliasing) */
1098 spa = nfit_buf + offset;
1099 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1100 spa->header.length = sizeof(*spa);
1101 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1102 spa->range_index = 0+1;
1103 spa->address = t->spa_set_dma[0];
1104 spa->length = SPA2_SIZE;
1105
1106 offset += sizeof(*spa);
1107 /* mem-region0 (spa0, dimm0) */
1108 memdev = nfit_buf + offset;
1109 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1110 memdev->header.length = sizeof(*memdev);
1111 memdev->device_handle = 0;
1112 memdev->physical_id = 0;
1113 memdev->region_id = 0;
1114 memdev->range_index = 0+1;
1115 memdev->region_index = 0+1;
1116 memdev->region_size = SPA2_SIZE;
1117 memdev->region_offset = 0;
1118 memdev->address = 0;
1119 memdev->interleave_index = 0;
1120 memdev->interleave_ways = 1;
1121 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1122 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1123 | ACPI_NFIT_MEM_NOT_ARMED;
1124
1125 offset += sizeof(*memdev);
1126 /* dcr-descriptor0 */
1127 dcr = nfit_buf + offset;
1128 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1129 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1130 dcr->region_index = 0+1;
1131 dcr->vendor_id = 0xabcd;
1132 dcr->device_id = 0;
1133 dcr->revision_id = 1;
1134 dcr->serial_number = ~0;
1135 dcr->code = 0x201;
1136 dcr->windows = 0;
1137 dcr->window_size = 0;
1138 dcr->command_offset = 0;
1139 dcr->command_size = 0;
1140 dcr->status_offset = 0;
1141 dcr->status_size = 0;
1142
1143 acpi_desc = &t->acpi_desc;
1144 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
1145 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
1146 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
1147 nd_desc = &acpi_desc->nd_desc;
1148 nd_desc->ndctl = nfit_test_ctl;
1149 }
1150
1151 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1152 void *iobuf, u64 len, int rw)
1153 {
1154 struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1155 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1156 struct nd_region *nd_region = &ndbr->nd_region;
1157 unsigned int lane;
1158
1159 lane = nd_region_acquire_lane(nd_region);
1160 if (rw)
1161 memcpy(mmio->addr.base + dpa, iobuf, len);
1162 else {
1163 memcpy(iobuf, mmio->addr.base + dpa, len);
1164
1165 /* give us some some coverage of the mmio_flush_range() API */
1166 mmio_flush_range(mmio->addr.base + dpa, len);
1167 }
1168 nd_region_release_lane(nd_region, lane);
1169
1170 return 0;
1171 }
1172
1173 static int nfit_test_probe(struct platform_device *pdev)
1174 {
1175 struct nvdimm_bus_descriptor *nd_desc;
1176 struct acpi_nfit_desc *acpi_desc;
1177 struct device *dev = &pdev->dev;
1178 struct nfit_test *nfit_test;
1179 int rc;
1180
1181 nfit_test = to_nfit_test(&pdev->dev);
1182
1183 /* common alloc */
1184 if (nfit_test->num_dcr) {
1185 int num = nfit_test->num_dcr;
1186
1187 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1188 GFP_KERNEL);
1189 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1190 GFP_KERNEL);
1191 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1192 GFP_KERNEL);
1193 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1194 GFP_KERNEL);
1195 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1196 GFP_KERNEL);
1197 nfit_test->label_dma = devm_kcalloc(dev, num,
1198 sizeof(dma_addr_t), GFP_KERNEL);
1199 nfit_test->dcr = devm_kcalloc(dev, num,
1200 sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1201 nfit_test->dcr_dma = devm_kcalloc(dev, num,
1202 sizeof(dma_addr_t), GFP_KERNEL);
1203 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1204 && nfit_test->label_dma && nfit_test->dcr
1205 && nfit_test->dcr_dma && nfit_test->flush
1206 && nfit_test->flush_dma)
1207 /* pass */;
1208 else
1209 return -ENOMEM;
1210 }
1211
1212 if (nfit_test->num_pm) {
1213 int num = nfit_test->num_pm;
1214
1215 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1216 GFP_KERNEL);
1217 nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1218 sizeof(dma_addr_t), GFP_KERNEL);
1219 if (nfit_test->spa_set && nfit_test->spa_set_dma)
1220 /* pass */;
1221 else
1222 return -ENOMEM;
1223 }
1224
1225 /* per-nfit specific alloc */
1226 if (nfit_test->alloc(nfit_test))
1227 return -ENOMEM;
1228
1229 nfit_test->setup(nfit_test);
1230 acpi_desc = &nfit_test->acpi_desc;
1231 acpi_desc->dev = &pdev->dev;
1232 acpi_desc->nfit = nfit_test->nfit_buf;
1233 acpi_desc->blk_do_io = nfit_test_blk_do_io;
1234 nd_desc = &acpi_desc->nd_desc;
1235 nd_desc->attr_groups = acpi_nfit_attribute_groups;
1236 acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
1237 if (!acpi_desc->nvdimm_bus)
1238 return -ENXIO;
1239
1240 INIT_LIST_HEAD(&acpi_desc->spa_maps);
1241 INIT_LIST_HEAD(&acpi_desc->spas);
1242 INIT_LIST_HEAD(&acpi_desc->dcrs);
1243 INIT_LIST_HEAD(&acpi_desc->bdws);
1244 INIT_LIST_HEAD(&acpi_desc->idts);
1245 INIT_LIST_HEAD(&acpi_desc->flushes);
1246 INIT_LIST_HEAD(&acpi_desc->memdevs);
1247 INIT_LIST_HEAD(&acpi_desc->dimms);
1248 mutex_init(&acpi_desc->spa_map_mutex);
1249 mutex_init(&acpi_desc->init_mutex);
1250
1251 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1252 if (rc) {
1253 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1254 return rc;
1255 }
1256
1257 if (nfit_test->setup != nfit_test0_setup)
1258 return 0;
1259
1260 nfit_test->setup_hotplug = 1;
1261 nfit_test->setup(nfit_test);
1262
1263 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1264 if (rc) {
1265 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1266 return rc;
1267 }
1268
1269 return 0;
1270 }
1271
1272 static int nfit_test_remove(struct platform_device *pdev)
1273 {
1274 struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
1275 struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
1276
1277 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1278
1279 return 0;
1280 }
1281
1282 static void nfit_test_release(struct device *dev)
1283 {
1284 struct nfit_test *nfit_test = to_nfit_test(dev);
1285
1286 kfree(nfit_test);
1287 }
1288
1289 static const struct platform_device_id nfit_test_id[] = {
1290 { KBUILD_MODNAME },
1291 { },
1292 };
1293
1294 static struct platform_driver nfit_test_driver = {
1295 .probe = nfit_test_probe,
1296 .remove = nfit_test_remove,
1297 .driver = {
1298 .name = KBUILD_MODNAME,
1299 },
1300 .id_table = nfit_test_id,
1301 };
1302
1303 #ifdef CONFIG_CMA_SIZE_MBYTES
1304 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
1305 #else
1306 #define CMA_SIZE_MBYTES 0
1307 #endif
1308
1309 static __init int nfit_test_init(void)
1310 {
1311 int rc, i;
1312
1313 nfit_test_setup(nfit_test_lookup);
1314
1315 for (i = 0; i < NUM_NFITS; i++) {
1316 struct nfit_test *nfit_test;
1317 struct platform_device *pdev;
1318 static int once;
1319
1320 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1321 if (!nfit_test) {
1322 rc = -ENOMEM;
1323 goto err_register;
1324 }
1325 INIT_LIST_HEAD(&nfit_test->resources);
1326 switch (i) {
1327 case 0:
1328 nfit_test->num_pm = NUM_PM;
1329 nfit_test->num_dcr = NUM_DCR;
1330 nfit_test->alloc = nfit_test0_alloc;
1331 nfit_test->setup = nfit_test0_setup;
1332 break;
1333 case 1:
1334 nfit_test->num_pm = 1;
1335 nfit_test->alloc = nfit_test1_alloc;
1336 nfit_test->setup = nfit_test1_setup;
1337 break;
1338 default:
1339 rc = -EINVAL;
1340 goto err_register;
1341 }
1342 pdev = &nfit_test->pdev;
1343 pdev->name = KBUILD_MODNAME;
1344 pdev->id = i;
1345 pdev->dev.release = nfit_test_release;
1346 rc = platform_device_register(pdev);
1347 if (rc) {
1348 put_device(&pdev->dev);
1349 goto err_register;
1350 }
1351
1352 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1353 if (rc)
1354 goto err_register;
1355
1356 instances[i] = nfit_test;
1357
1358 if (!once++) {
1359 dma_addr_t dma;
1360 void *buf;
1361
1362 buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
1363 GFP_KERNEL);
1364 if (!buf) {
1365 rc = -ENOMEM;
1366 dev_warn(&pdev->dev, "need 128M of free cma\n");
1367 goto err_register;
1368 }
1369 dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
1370 }
1371 }
1372
1373 rc = platform_driver_register(&nfit_test_driver);
1374 if (rc)
1375 goto err_register;
1376 return 0;
1377
1378 err_register:
1379 for (i = 0; i < NUM_NFITS; i++)
1380 if (instances[i])
1381 platform_device_unregister(&instances[i]->pdev);
1382 nfit_test_teardown();
1383 return rc;
1384 }
1385
1386 static __exit void nfit_test_exit(void)
1387 {
1388 int i;
1389
1390 platform_driver_unregister(&nfit_test_driver);
1391 for (i = 0; i < NUM_NFITS; i++)
1392 platform_device_unregister(&instances[i]->pdev);
1393 nfit_test_teardown();
1394 }
1395
1396 module_init(nfit_test_init);
1397 module_exit(nfit_test_exit);
1398 MODULE_LICENSE("GPL v2");
1399 MODULE_AUTHOR("Intel Corporation");