]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/acpi/nfit/core.c
ACPI / extlog: Switch to use new generic UUID API
[mirror_ubuntu-bionic-kernel.git] / drivers / acpi / nfit / core.c
CommitLineData
b94d5230
DW
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
047fc8a1 16#include <linux/mutex.h>
62232e45 17#include <linux/ndctl.h>
37b137ff 18#include <linux/sysfs.h>
0caeef63 19#include <linux/delay.h>
b94d5230
DW
20#include <linux/list.h>
21#include <linux/acpi.h>
eaf96153 22#include <linux/sort.h>
c2ad2954 23#include <linux/pmem.h>
047fc8a1 24#include <linux/io.h>
1cf03c00 25#include <linux/nd.h>
96601adb 26#include <asm/cacheflush.h>
b94d5230
DW
27#include "nfit.h"
28
047fc8a1
RZ
29/*
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
31 * irrelevant.
32 */
2f8e2c87 33#include <linux/io-64-nonatomic-hi-lo.h>
047fc8a1 34
4d88a97a
DW
35static bool force_enable_dimms;
36module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
37MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
38
1cf03c00
DW
39static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
40module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
41MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
42
43/* after three payloads of overflow, it's dead jim */
44static unsigned int scrub_overflow_abort = 3;
45module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
46MODULE_PARM_DESC(scrub_overflow_abort,
47 "Number of times we overflow ARS results before abort");
48
87554098
DW
49static bool disable_vendor_specific;
50module_param(disable_vendor_specific, bool, S_IRUGO);
51MODULE_PARM_DESC(disable_vendor_specific,
f2668fa7 52 "Limit commands to the publicly specified set");
87554098 53
095ab4b3
LK
54static unsigned long override_dsm_mask;
55module_param(override_dsm_mask, ulong, S_IRUGO);
56MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
57
ba650cfc
LK
58static int default_dsm_family = -1;
59module_param(default_dsm_family, int, S_IRUGO);
60MODULE_PARM_DESC(default_dsm_family,
61 "Try this DSM type first when identifying NVDIMM family");
62
6839a6d9
VV
63LIST_HEAD(acpi_descs);
64DEFINE_MUTEX(acpi_desc_lock);
65
7ae0fa43
DW
66static struct workqueue_struct *nfit_wq;
67
20985164
VV
68struct nfit_table_prev {
69 struct list_head spas;
70 struct list_head memdevs;
71 struct list_head dcrs;
72 struct list_head bdws;
73 struct list_head idts;
74 struct list_head flushes;
75};
76
41c8bdb3 77static guid_t nfit_uuid[NFIT_UUID_MAX];
b94d5230 78
41c8bdb3 79const guid_t *to_nfit_uuid(enum nfit_uuids id)
b94d5230 80{
41c8bdb3 81 return &nfit_uuid[id];
b94d5230 82}
6bc75619 83EXPORT_SYMBOL(to_nfit_uuid);
b94d5230 84
62232e45
DW
85static struct acpi_nfit_desc *to_acpi_nfit_desc(
86 struct nvdimm_bus_descriptor *nd_desc)
87{
88 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
89}
90
91static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
92{
93 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
94
95 /*
96 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
97 * acpi_device.
98 */
99 if (!nd_desc->provider_name
100 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
101 return NULL;
102
103 return to_acpi_device(acpi_desc->dev);
104}
105
d6eb270c 106static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
aef25338 107{
d4f32367 108 struct nd_cmd_clear_error *clear_err;
aef25338 109 struct nd_cmd_ars_status *ars_status;
aef25338
DW
110 u16 flags;
111
112 switch (cmd) {
113 case ND_CMD_ARS_CAP:
11294d63 114 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
aef25338
DW
115 return -ENOTTY;
116
117 /* Command failed */
11294d63 118 if (status & 0xffff)
aef25338
DW
119 return -EIO;
120
121 /* No supported scan types for this range */
122 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
11294d63 123 if ((status >> 16 & flags) == 0)
aef25338 124 return -ENOTTY;
9a901f54 125 return 0;
aef25338 126 case ND_CMD_ARS_START:
aef25338 127 /* ARS is in progress */
11294d63 128 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
aef25338
DW
129 return -EBUSY;
130
131 /* Command failed */
11294d63 132 if (status & 0xffff)
aef25338 133 return -EIO;
9a901f54 134 return 0;
aef25338
DW
135 case ND_CMD_ARS_STATUS:
136 ars_status = buf;
137 /* Command failed */
11294d63 138 if (status & 0xffff)
aef25338
DW
139 return -EIO;
140 /* Check extended status (Upper two bytes) */
11294d63 141 if (status == NFIT_ARS_STATUS_DONE)
aef25338
DW
142 return 0;
143
144 /* ARS is in progress */
11294d63 145 if (status == NFIT_ARS_STATUS_BUSY)
aef25338
DW
146 return -EBUSY;
147
148 /* No ARS performed for the current boot */
11294d63 149 if (status == NFIT_ARS_STATUS_NONE)
aef25338
DW
150 return -EAGAIN;
151
152 /*
153 * ARS interrupted, either we overflowed or some other
154 * agent wants the scan to stop. If we didn't overflow
155 * then just continue with the returned results.
156 */
11294d63 157 if (status == NFIT_ARS_STATUS_INTR) {
82aa37cf
DW
158 if (ars_status->out_length >= 40 && (ars_status->flags
159 & NFIT_ARS_F_OVERFLOW))
aef25338
DW
160 return -ENOSPC;
161 return 0;
162 }
163
164 /* Unknown status */
11294d63 165 if (status >> 16)
aef25338 166 return -EIO;
9a901f54 167 return 0;
d4f32367
DW
168 case ND_CMD_CLEAR_ERROR:
169 clear_err = buf;
11294d63 170 if (status & 0xffff)
d4f32367
DW
171 return -EIO;
172 if (!clear_err->cleared)
173 return -EIO;
174 if (clear_err->length > clear_err->cleared)
175 return clear_err->cleared;
9a901f54 176 return 0;
aef25338
DW
177 default:
178 break;
179 }
180
11294d63
DW
181 /* all other non-zero status results in an error */
182 if (status)
183 return -EIO;
aef25338
DW
184 return 0;
185}
186
9d62ed96
DW
187static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status)
188{
189 switch (cmd) {
190 case ND_CMD_GET_CONFIG_SIZE:
191 if (status >> 16 & ND_CONFIG_LOCKED)
192 return -EACCES;
193 break;
194 default:
195 break;
196 }
197
198 /* all other non-zero status results in an error */
199 if (status)
200 return -EIO;
201 return 0;
202}
203
d6eb270c
DW
204static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
205 u32 status)
206{
207 if (!nvdimm)
208 return xlat_bus_status(buf, cmd, status);
9d62ed96 209 return xlat_nvdimm_status(buf, cmd, status);
d6eb270c
DW
210}
211
a7de92da
DW
212int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
213 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
b94d5230 214{
62232e45 215 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
62232e45 216 union acpi_object in_obj, in_buf, *out_obj;
31eca76b 217 const struct nd_cmd_desc *desc = NULL;
62232e45 218 struct device *dev = acpi_desc->dev;
31eca76b 219 struct nd_cmd_pkg *call_pkg = NULL;
62232e45 220 const char *cmd_name, *dimm_name;
31eca76b 221 unsigned long cmd_mask, dsm_mask;
11294d63 222 u32 offset, fw_status = 0;
62232e45 223 acpi_handle handle;
31eca76b 224 unsigned int func;
41c8bdb3 225 const guid_t *guid;
62232e45
DW
226 int rc, i;
227
31eca76b
DW
228 func = cmd;
229 if (cmd == ND_CMD_CALL) {
230 call_pkg = buf;
231 func = call_pkg->nd_command;
232 }
233
62232e45
DW
234 if (nvdimm) {
235 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
236 struct acpi_device *adev = nfit_mem->adev;
237
238 if (!adev)
239 return -ENOTTY;
31eca76b
DW
240 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
241 return -ENOTTY;
242
047fc8a1 243 dimm_name = nvdimm_name(nvdimm);
62232e45 244 cmd_name = nvdimm_cmd_name(cmd);
e3654eca 245 cmd_mask = nvdimm_cmd_mask(nvdimm);
62232e45
DW
246 dsm_mask = nfit_mem->dsm_mask;
247 desc = nd_cmd_dimm_desc(cmd);
41c8bdb3 248 guid = to_nfit_uuid(nfit_mem->family);
62232e45
DW
249 handle = adev->handle;
250 } else {
251 struct acpi_device *adev = to_acpi_dev(acpi_desc);
252
253 cmd_name = nvdimm_bus_cmd_name(cmd);
e3654eca 254 cmd_mask = nd_desc->cmd_mask;
31eca76b 255 dsm_mask = cmd_mask;
62232e45 256 desc = nd_cmd_bus_desc(cmd);
41c8bdb3 257 guid = to_nfit_uuid(NFIT_DEV_BUS);
62232e45
DW
258 handle = adev->handle;
259 dimm_name = "bus";
260 }
261
262 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
263 return -ENOTTY;
264
31eca76b 265 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
62232e45
DW
266 return -ENOTTY;
267
268 in_obj.type = ACPI_TYPE_PACKAGE;
269 in_obj.package.count = 1;
270 in_obj.package.elements = &in_buf;
271 in_buf.type = ACPI_TYPE_BUFFER;
272 in_buf.buffer.pointer = buf;
273 in_buf.buffer.length = 0;
274
275 /* libnvdimm has already validated the input envelope */
276 for (i = 0; i < desc->in_num; i++)
277 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
278 i, buf);
279
31eca76b
DW
280 if (call_pkg) {
281 /* skip over package wrapper */
282 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
283 in_buf.buffer.length = call_pkg->nd_size_in;
284 }
285
7699a6a3
DW
286 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
287 __func__, dimm_name, cmd, func, in_buf.buffer.length);
288 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
31eca76b
DW
289 in_buf.buffer.pointer,
290 min_t(u32, 256, in_buf.buffer.length), true);
62232e45 291
41c8bdb3 292 out_obj = acpi_evaluate_dsm(handle, guid.b, 1, func, &in_obj);
62232e45
DW
293 if (!out_obj) {
294 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
295 cmd_name);
296 return -EINVAL;
297 }
298
31eca76b
DW
299 if (call_pkg) {
300 call_pkg->nd_fw_size = out_obj->buffer.length;
301 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
302 out_obj->buffer.pointer,
303 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
304
305 ACPI_FREE(out_obj);
306 /*
307 * Need to support FW function w/o known size in advance.
308 * Caller can determine required size based upon nd_fw_size.
309 * If we return an error (like elsewhere) then caller wouldn't
310 * be able to rely upon data returned to make calculation.
311 */
312 return 0;
313 }
314
62232e45
DW
315 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
316 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
317 __func__, dimm_name, cmd_name, out_obj->type);
318 rc = -EINVAL;
319 goto out;
320 }
321
7699a6a3
DW
322 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name,
323 cmd_name, out_obj->buffer.length);
324 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
325 out_obj->buffer.pointer,
326 min_t(u32, 128, out_obj->buffer.length), true);
62232e45
DW
327
328 for (i = 0, offset = 0; i < desc->out_num; i++) {
329 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
efda1b5d
DW
330 (u32 *) out_obj->buffer.pointer,
331 out_obj->buffer.length - offset);
62232e45
DW
332
333 if (offset + out_size > out_obj->buffer.length) {
334 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
335 __func__, dimm_name, cmd_name, i);
336 break;
337 }
338
339 if (in_buf.buffer.length + offset + out_size > buf_len) {
340 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
341 __func__, dimm_name, cmd_name, i);
342 rc = -ENXIO;
343 goto out;
344 }
345 memcpy(buf + in_buf.buffer.length + offset,
346 out_obj->buffer.pointer + offset, out_size);
347 offset += out_size;
348 }
11294d63
DW
349
350 /*
351 * Set fw_status for all the commands with a known format to be
352 * later interpreted by xlat_status().
353 */
354 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
355 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
356 fw_status = *(u32 *) out_obj->buffer.pointer;
357
62232e45
DW
358 if (offset + in_buf.buffer.length < buf_len) {
359 if (i >= 1) {
360 /*
361 * status valid, return the number of bytes left
362 * unfilled in the output buffer
363 */
364 rc = buf_len - offset - in_buf.buffer.length;
aef25338 365 if (cmd_rc)
d6eb270c
DW
366 *cmd_rc = xlat_status(nvdimm, buf, cmd,
367 fw_status);
62232e45
DW
368 } else {
369 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
370 __func__, dimm_name, cmd_name, buf_len,
371 offset);
372 rc = -ENXIO;
373 }
2eea6582 374 } else {
62232e45 375 rc = 0;
2eea6582 376 if (cmd_rc)
d6eb270c 377 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
2eea6582 378 }
62232e45
DW
379
380 out:
381 ACPI_FREE(out_obj);
382
383 return rc;
b94d5230 384}
a7de92da 385EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
b94d5230
DW
386
387static const char *spa_type_name(u16 type)
388{
389 static const char *to_name[] = {
390 [NFIT_SPA_VOLATILE] = "volatile",
391 [NFIT_SPA_PM] = "pmem",
392 [NFIT_SPA_DCR] = "dimm-control-region",
393 [NFIT_SPA_BDW] = "block-data-window",
394 [NFIT_SPA_VDISK] = "volatile-disk",
395 [NFIT_SPA_VCD] = "volatile-cd",
396 [NFIT_SPA_PDISK] = "persistent-disk",
397 [NFIT_SPA_PCD] = "persistent-cd",
398
399 };
400
401 if (type > NFIT_SPA_PCD)
402 return "unknown";
403
404 return to_name[type];
405}
406
6839a6d9 407int nfit_spa_type(struct acpi_nfit_system_address *spa)
b94d5230
DW
408{
409 int i;
410
411 for (i = 0; i < NFIT_UUID_MAX; i++)
41c8bdb3 412 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
b94d5230
DW
413 return i;
414 return -1;
415}
416
417static bool add_spa(struct acpi_nfit_desc *acpi_desc,
20985164 418 struct nfit_table_prev *prev,
b94d5230
DW
419 struct acpi_nfit_system_address *spa)
420{
421 struct device *dev = acpi_desc->dev;
20985164
VV
422 struct nfit_spa *nfit_spa;
423
31932041
DW
424 if (spa->header.length != sizeof(*spa))
425 return false;
426
20985164 427 list_for_each_entry(nfit_spa, &prev->spas, list) {
31932041 428 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
20985164
VV
429 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
430 return true;
431 }
432 }
b94d5230 433
31932041
DW
434 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
435 GFP_KERNEL);
b94d5230
DW
436 if (!nfit_spa)
437 return false;
438 INIT_LIST_HEAD(&nfit_spa->list);
31932041 439 memcpy(nfit_spa->spa, spa, sizeof(*spa));
b94d5230
DW
440 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
441 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
442 spa->range_index,
443 spa_type_name(nfit_spa_type(spa)));
444 return true;
445}
446
447static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
20985164 448 struct nfit_table_prev *prev,
b94d5230
DW
449 struct acpi_nfit_memory_map *memdev)
450{
451 struct device *dev = acpi_desc->dev;
20985164 452 struct nfit_memdev *nfit_memdev;
b94d5230 453
31932041
DW
454 if (memdev->header.length != sizeof(*memdev))
455 return false;
456
20985164 457 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
31932041 458 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
20985164
VV
459 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
460 return true;
461 }
462
31932041
DW
463 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
464 GFP_KERNEL);
b94d5230
DW
465 if (!nfit_memdev)
466 return false;
467 INIT_LIST_HEAD(&nfit_memdev->list);
31932041 468 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
b94d5230 469 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
caa603aa 470 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
b94d5230 471 __func__, memdev->device_handle, memdev->range_index,
caa603aa 472 memdev->region_index, memdev->flags);
b94d5230
DW
473 return true;
474}
475
31932041
DW
476/*
477 * An implementation may provide a truncated control region if no block windows
478 * are defined.
479 */
480static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
481{
482 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
483 window_size))
484 return 0;
485 if (dcr->windows)
486 return sizeof(*dcr);
487 return offsetof(struct acpi_nfit_control_region, window_size);
488}
489
b94d5230 490static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
20985164 491 struct nfit_table_prev *prev,
b94d5230
DW
492 struct acpi_nfit_control_region *dcr)
493{
494 struct device *dev = acpi_desc->dev;
20985164
VV
495 struct nfit_dcr *nfit_dcr;
496
31932041
DW
497 if (!sizeof_dcr(dcr))
498 return false;
499
20985164 500 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
31932041 501 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
20985164
VV
502 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
503 return true;
504 }
b94d5230 505
31932041
DW
506 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
507 GFP_KERNEL);
b94d5230
DW
508 if (!nfit_dcr)
509 return false;
510 INIT_LIST_HEAD(&nfit_dcr->list);
31932041 511 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
b94d5230
DW
512 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
513 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
514 dcr->region_index, dcr->windows);
515 return true;
516}
517
518static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
20985164 519 struct nfit_table_prev *prev,
b94d5230
DW
520 struct acpi_nfit_data_region *bdw)
521{
522 struct device *dev = acpi_desc->dev;
20985164
VV
523 struct nfit_bdw *nfit_bdw;
524
31932041
DW
525 if (bdw->header.length != sizeof(*bdw))
526 return false;
20985164 527 list_for_each_entry(nfit_bdw, &prev->bdws, list)
31932041 528 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
20985164
VV
529 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
530 return true;
531 }
b94d5230 532
31932041
DW
533 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
534 GFP_KERNEL);
b94d5230
DW
535 if (!nfit_bdw)
536 return false;
537 INIT_LIST_HEAD(&nfit_bdw->list);
31932041 538 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
b94d5230
DW
539 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
540 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
541 bdw->region_index, bdw->windows);
542 return true;
543}
544
31932041
DW
545static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
546{
547 if (idt->header.length < sizeof(*idt))
548 return 0;
549 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
550}
551
047fc8a1 552static bool add_idt(struct acpi_nfit_desc *acpi_desc,
20985164 553 struct nfit_table_prev *prev,
047fc8a1
RZ
554 struct acpi_nfit_interleave *idt)
555{
556 struct device *dev = acpi_desc->dev;
20985164
VV
557 struct nfit_idt *nfit_idt;
558
31932041
DW
559 if (!sizeof_idt(idt))
560 return false;
561
562 list_for_each_entry(nfit_idt, &prev->idts, list) {
563 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
564 continue;
565
566 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
20985164
VV
567 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
568 return true;
569 }
31932041 570 }
047fc8a1 571
31932041
DW
572 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
573 GFP_KERNEL);
047fc8a1
RZ
574 if (!nfit_idt)
575 return false;
576 INIT_LIST_HEAD(&nfit_idt->list);
31932041 577 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
047fc8a1
RZ
578 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
579 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
580 idt->interleave_index, idt->line_count);
581 return true;
582}
583
31932041
DW
584static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
585{
586 if (flush->header.length < sizeof(*flush))
587 return 0;
588 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
589}
590
c2ad2954 591static bool add_flush(struct acpi_nfit_desc *acpi_desc,
20985164 592 struct nfit_table_prev *prev,
c2ad2954
RZ
593 struct acpi_nfit_flush_address *flush)
594{
595 struct device *dev = acpi_desc->dev;
20985164 596 struct nfit_flush *nfit_flush;
c2ad2954 597
31932041
DW
598 if (!sizeof_flush(flush))
599 return false;
600
601 list_for_each_entry(nfit_flush, &prev->flushes, list) {
602 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
603 continue;
604
605 if (memcmp(nfit_flush->flush, flush,
606 sizeof_flush(flush)) == 0) {
20985164
VV
607 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
608 return true;
609 }
31932041 610 }
20985164 611
31932041
DW
612 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
613 + sizeof_flush(flush), GFP_KERNEL);
c2ad2954
RZ
614 if (!nfit_flush)
615 return false;
616 INIT_LIST_HEAD(&nfit_flush->list);
31932041 617 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
c2ad2954
RZ
618 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
619 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
620 flush->device_handle, flush->hint_count);
621 return true;
622}
623
20985164
VV
624static void *add_table(struct acpi_nfit_desc *acpi_desc,
625 struct nfit_table_prev *prev, void *table, const void *end)
b94d5230
DW
626{
627 struct device *dev = acpi_desc->dev;
628 struct acpi_nfit_header *hdr;
629 void *err = ERR_PTR(-ENOMEM);
630
631 if (table >= end)
632 return NULL;
633
634 hdr = table;
564d5011
VV
635 if (!hdr->length) {
636 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
637 hdr->type);
638 return NULL;
639 }
640
b94d5230
DW
641 switch (hdr->type) {
642 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
20985164 643 if (!add_spa(acpi_desc, prev, table))
b94d5230
DW
644 return err;
645 break;
646 case ACPI_NFIT_TYPE_MEMORY_MAP:
20985164 647 if (!add_memdev(acpi_desc, prev, table))
b94d5230
DW
648 return err;
649 break;
650 case ACPI_NFIT_TYPE_CONTROL_REGION:
20985164 651 if (!add_dcr(acpi_desc, prev, table))
b94d5230
DW
652 return err;
653 break;
654 case ACPI_NFIT_TYPE_DATA_REGION:
20985164 655 if (!add_bdw(acpi_desc, prev, table))
b94d5230
DW
656 return err;
657 break;
b94d5230 658 case ACPI_NFIT_TYPE_INTERLEAVE:
20985164 659 if (!add_idt(acpi_desc, prev, table))
047fc8a1 660 return err;
b94d5230
DW
661 break;
662 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
20985164 663 if (!add_flush(acpi_desc, prev, table))
c2ad2954 664 return err;
b94d5230
DW
665 break;
666 case ACPI_NFIT_TYPE_SMBIOS:
667 dev_dbg(dev, "%s: smbios\n", __func__);
668 break;
669 default:
670 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
671 break;
672 }
673
674 return table + hdr->length;
675}
676
677static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
678 struct nfit_mem *nfit_mem)
679{
680 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
681 u16 dcr = nfit_mem->dcr->region_index;
682 struct nfit_spa *nfit_spa;
683
684 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
685 u16 range_index = nfit_spa->spa->range_index;
686 int type = nfit_spa_type(nfit_spa->spa);
687 struct nfit_memdev *nfit_memdev;
688
689 if (type != NFIT_SPA_BDW)
690 continue;
691
692 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
693 if (nfit_memdev->memdev->range_index != range_index)
694 continue;
695 if (nfit_memdev->memdev->device_handle != device_handle)
696 continue;
697 if (nfit_memdev->memdev->region_index != dcr)
698 continue;
699
700 nfit_mem->spa_bdw = nfit_spa->spa;
701 return;
702 }
703 }
704
705 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
706 nfit_mem->spa_dcr->range_index);
707 nfit_mem->bdw = NULL;
708}
709
6697b2cf 710static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
b94d5230
DW
711 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
712{
713 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
047fc8a1 714 struct nfit_memdev *nfit_memdev;
b94d5230 715 struct nfit_bdw *nfit_bdw;
047fc8a1
RZ
716 struct nfit_idt *nfit_idt;
717 u16 idt_idx, range_index;
b94d5230 718
b94d5230
DW
719 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
720 if (nfit_bdw->bdw->region_index != dcr)
721 continue;
722 nfit_mem->bdw = nfit_bdw->bdw;
723 break;
724 }
725
726 if (!nfit_mem->bdw)
6697b2cf 727 return;
b94d5230
DW
728
729 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
047fc8a1
RZ
730
731 if (!nfit_mem->spa_bdw)
6697b2cf 732 return;
047fc8a1
RZ
733
734 range_index = nfit_mem->spa_bdw->range_index;
735 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
736 if (nfit_memdev->memdev->range_index != range_index ||
737 nfit_memdev->memdev->region_index != dcr)
738 continue;
739 nfit_mem->memdev_bdw = nfit_memdev->memdev;
740 idt_idx = nfit_memdev->memdev->interleave_index;
741 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
742 if (nfit_idt->idt->interleave_index != idt_idx)
743 continue;
744 nfit_mem->idt_bdw = nfit_idt->idt;
745 break;
746 }
747 break;
748 }
b94d5230
DW
749}
750
1499934d 751static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
b94d5230
DW
752 struct acpi_nfit_system_address *spa)
753{
754 struct nfit_mem *nfit_mem, *found;
755 struct nfit_memdev *nfit_memdev;
1499934d 756 int type = spa ? nfit_spa_type(spa) : 0;
b94d5230
DW
757
758 switch (type) {
759 case NFIT_SPA_DCR:
760 case NFIT_SPA_PM:
761 break;
762 default:
1499934d
DW
763 if (spa)
764 return 0;
b94d5230
DW
765 }
766
1499934d
DW
767 /*
768 * This loop runs in two modes, when a dimm is mapped the loop
769 * adds memdev associations to an existing dimm, or creates a
770 * dimm. In the unmapped dimm case this loop sweeps for memdev
771 * instances with an invalid / zero range_index and adds those
772 * dimms without spa associations.
773 */
b94d5230 774 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
ad9ac5e1 775 struct nfit_flush *nfit_flush;
6697b2cf
DW
776 struct nfit_dcr *nfit_dcr;
777 u32 device_handle;
778 u16 dcr;
b94d5230 779
1499934d
DW
780 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
781 continue;
782 if (!spa && nfit_memdev->memdev->range_index)
b94d5230
DW
783 continue;
784 found = NULL;
785 dcr = nfit_memdev->memdev->region_index;
6697b2cf 786 device_handle = nfit_memdev->memdev->device_handle;
b94d5230 787 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
6697b2cf
DW
788 if (__to_nfit_memdev(nfit_mem)->device_handle
789 == device_handle) {
b94d5230
DW
790 found = nfit_mem;
791 break;
792 }
793
794 if (found)
795 nfit_mem = found;
796 else {
797 nfit_mem = devm_kzalloc(acpi_desc->dev,
798 sizeof(*nfit_mem), GFP_KERNEL);
799 if (!nfit_mem)
800 return -ENOMEM;
801 INIT_LIST_HEAD(&nfit_mem->list);
8cc6ddfc 802 nfit_mem->acpi_desc = acpi_desc;
6697b2cf
DW
803 list_add(&nfit_mem->list, &acpi_desc->dimms);
804 }
805
806 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
807 if (nfit_dcr->dcr->region_index != dcr)
808 continue;
809 /*
810 * Record the control region for the dimm. For
811 * the ACPI 6.1 case, where there are separate
812 * control regions for the pmem vs blk
813 * interfaces, be sure to record the extended
814 * blk details.
815 */
816 if (!nfit_mem->dcr)
817 nfit_mem->dcr = nfit_dcr->dcr;
818 else if (nfit_mem->dcr->windows == 0
819 && nfit_dcr->dcr->windows)
820 nfit_mem->dcr = nfit_dcr->dcr;
821 break;
822 }
823
ad9ac5e1 824 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
e5ae3b25
DW
825 struct acpi_nfit_flush_address *flush;
826 u16 i;
827
ad9ac5e1
DW
828 if (nfit_flush->flush->device_handle != device_handle)
829 continue;
830 nfit_mem->nfit_flush = nfit_flush;
e5ae3b25
DW
831 flush = nfit_flush->flush;
832 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
833 flush->hint_count
834 * sizeof(struct resource), GFP_KERNEL);
835 if (!nfit_mem->flush_wpq)
836 return -ENOMEM;
837 for (i = 0; i < flush->hint_count; i++) {
838 struct resource *res = &nfit_mem->flush_wpq[i];
839
840 res->start = flush->hint_address[i];
841 res->end = res->start + 8 - 1;
842 }
ad9ac5e1
DW
843 break;
844 }
845
6697b2cf
DW
846 if (dcr && !nfit_mem->dcr) {
847 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
848 spa->range_index, dcr);
849 return -ENODEV;
b94d5230
DW
850 }
851
852 if (type == NFIT_SPA_DCR) {
047fc8a1
RZ
853 struct nfit_idt *nfit_idt;
854 u16 idt_idx;
855
b94d5230
DW
856 /* multiple dimms may share a SPA when interleaved */
857 nfit_mem->spa_dcr = spa;
858 nfit_mem->memdev_dcr = nfit_memdev->memdev;
047fc8a1
RZ
859 idt_idx = nfit_memdev->memdev->interleave_index;
860 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
861 if (nfit_idt->idt->interleave_index != idt_idx)
862 continue;
863 nfit_mem->idt_dcr = nfit_idt->idt;
864 break;
865 }
6697b2cf 866 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1499934d 867 } else if (type == NFIT_SPA_PM) {
b94d5230
DW
868 /*
869 * A single dimm may belong to multiple SPA-PM
870 * ranges, record at least one in addition to
871 * any SPA-DCR range.
872 */
873 nfit_mem->memdev_pmem = nfit_memdev->memdev;
1499934d
DW
874 } else
875 nfit_mem->memdev_dcr = nfit_memdev->memdev;
b94d5230
DW
876 }
877
878 return 0;
879}
880
881static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
882{
883 struct nfit_mem *a = container_of(_a, typeof(*a), list);
884 struct nfit_mem *b = container_of(_b, typeof(*b), list);
885 u32 handleA, handleB;
886
887 handleA = __to_nfit_memdev(a)->device_handle;
888 handleB = __to_nfit_memdev(b)->device_handle;
889 if (handleA < handleB)
890 return -1;
891 else if (handleA > handleB)
892 return 1;
893 return 0;
894}
895
896static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
897{
898 struct nfit_spa *nfit_spa;
1499934d
DW
899 int rc;
900
b94d5230
DW
901
902 /*
903 * For each SPA-DCR or SPA-PMEM address range find its
904 * corresponding MEMDEV(s). From each MEMDEV find the
905 * corresponding DCR. Then, if we're operating on a SPA-DCR,
906 * try to find a SPA-BDW and a corresponding BDW that references
907 * the DCR. Throw it all into an nfit_mem object. Note, that
908 * BDWs are optional.
909 */
910 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1499934d 911 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
b94d5230
DW
912 if (rc)
913 return rc;
914 }
915
1499934d
DW
916 /*
917 * If a DIMM has failed to be mapped into SPA there will be no
918 * SPA entries above. Find and register all the unmapped DIMMs
919 * for reporting and recovery purposes.
920 */
921 rc = __nfit_mem_init(acpi_desc, NULL);
922 if (rc)
923 return rc;
924
b94d5230
DW
925 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
926
927 return 0;
928}
929
45def22c
DW
930static ssize_t revision_show(struct device *dev,
931 struct device_attribute *attr, char *buf)
932{
933 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
934 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
935 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
936
6b577c9d 937 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
45def22c
DW
938}
939static DEVICE_ATTR_RO(revision);
940
9ffd6350
VV
941static ssize_t hw_error_scrub_show(struct device *dev,
942 struct device_attribute *attr, char *buf)
943{
944 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
945 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
946 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
947
948 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
949}
950
951/*
952 * The 'hw_error_scrub' attribute can have the following values written to it:
953 * '0': Switch to the default mode where an exception will only insert
954 * the address of the memory error into the poison and badblocks lists.
955 * '1': Enable a full scrub to happen if an exception for a memory error is
956 * received.
957 */
958static ssize_t hw_error_scrub_store(struct device *dev,
959 struct device_attribute *attr, const char *buf, size_t size)
960{
961 struct nvdimm_bus_descriptor *nd_desc;
962 ssize_t rc;
963 long val;
964
965 rc = kstrtol(buf, 0, &val);
966 if (rc)
967 return rc;
968
969 device_lock(dev);
970 nd_desc = dev_get_drvdata(dev);
971 if (nd_desc) {
972 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
973
974 switch (val) {
975 case HW_ERROR_SCRUB_ON:
976 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
977 break;
978 case HW_ERROR_SCRUB_OFF:
979 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
980 break;
981 default:
982 rc = -EINVAL;
983 break;
984 }
985 }
986 device_unlock(dev);
987 if (rc)
988 return rc;
989 return size;
990}
991static DEVICE_ATTR_RW(hw_error_scrub);
992
37b137ff
VV
993/*
994 * This shows the number of full Address Range Scrubs that have been
995 * completed since driver load time. Userspace can wait on this using
996 * select/poll etc. A '+' at the end indicates an ARS is in progress
997 */
998static ssize_t scrub_show(struct device *dev,
999 struct device_attribute *attr, char *buf)
1000{
1001 struct nvdimm_bus_descriptor *nd_desc;
1002 ssize_t rc = -ENXIO;
1003
1004 device_lock(dev);
1005 nd_desc = dev_get_drvdata(dev);
1006 if (nd_desc) {
1007 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1008
1009 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1010 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
1011 }
1012 device_unlock(dev);
1013 return rc;
1014}
1015
37b137ff
VV
1016static ssize_t scrub_store(struct device *dev,
1017 struct device_attribute *attr, const char *buf, size_t size)
1018{
1019 struct nvdimm_bus_descriptor *nd_desc;
1020 ssize_t rc;
1021 long val;
1022
1023 rc = kstrtol(buf, 0, &val);
1024 if (rc)
1025 return rc;
1026 if (val != 1)
1027 return -EINVAL;
1028
1029 device_lock(dev);
1030 nd_desc = dev_get_drvdata(dev);
1031 if (nd_desc) {
1032 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1033
1034 rc = acpi_nfit_ars_rescan(acpi_desc);
1035 }
1036 device_unlock(dev);
1037 if (rc)
1038 return rc;
1039 return size;
1040}
1041static DEVICE_ATTR_RW(scrub);
1042
1043static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1044{
1045 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1046 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1047 | 1 << ND_CMD_ARS_STATUS;
1048
1049 return (nd_desc->cmd_mask & mask) == mask;
1050}
1051
1052static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1053{
1054 struct device *dev = container_of(kobj, struct device, kobj);
1055 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1056
1057 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1058 return 0;
1059 return a->mode;
1060}
1061
45def22c
DW
1062static struct attribute *acpi_nfit_attributes[] = {
1063 &dev_attr_revision.attr,
37b137ff 1064 &dev_attr_scrub.attr,
9ffd6350 1065 &dev_attr_hw_error_scrub.attr,
45def22c
DW
1066 NULL,
1067};
1068
1069static struct attribute_group acpi_nfit_attribute_group = {
1070 .name = "nfit",
1071 .attrs = acpi_nfit_attributes,
37b137ff 1072 .is_visible = nfit_visible,
45def22c
DW
1073};
1074
a61fe6f7 1075static const struct attribute_group *acpi_nfit_attribute_groups[] = {
45def22c
DW
1076 &nvdimm_bus_attribute_group,
1077 &acpi_nfit_attribute_group,
1078 NULL,
1079};
1080
e6dfb2de
DW
1081static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1082{
1083 struct nvdimm *nvdimm = to_nvdimm(dev);
1084 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1085
1086 return __to_nfit_memdev(nfit_mem);
1087}
1088
1089static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1090{
1091 struct nvdimm *nvdimm = to_nvdimm(dev);
1092 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1093
1094 return nfit_mem->dcr;
1095}
1096
1097static ssize_t handle_show(struct device *dev,
1098 struct device_attribute *attr, char *buf)
1099{
1100 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1101
1102 return sprintf(buf, "%#x\n", memdev->device_handle);
1103}
1104static DEVICE_ATTR_RO(handle);
1105
1106static ssize_t phys_id_show(struct device *dev,
1107 struct device_attribute *attr, char *buf)
1108{
1109 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1110
1111 return sprintf(buf, "%#x\n", memdev->physical_id);
1112}
1113static DEVICE_ATTR_RO(phys_id);
1114
1115static ssize_t vendor_show(struct device *dev,
1116 struct device_attribute *attr, char *buf)
1117{
1118 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1119
5ad9a7fd 1120 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
e6dfb2de
DW
1121}
1122static DEVICE_ATTR_RO(vendor);
1123
1124static ssize_t rev_id_show(struct device *dev,
1125 struct device_attribute *attr, char *buf)
1126{
1127 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1128
5ad9a7fd 1129 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
e6dfb2de
DW
1130}
1131static DEVICE_ATTR_RO(rev_id);
1132
1133static ssize_t device_show(struct device *dev,
1134 struct device_attribute *attr, char *buf)
1135{
1136 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1137
5ad9a7fd 1138 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
e6dfb2de
DW
1139}
1140static DEVICE_ATTR_RO(device);
1141
6ca72085
DW
1142static ssize_t subsystem_vendor_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
1144{
1145 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1146
1147 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1148}
1149static DEVICE_ATTR_RO(subsystem_vendor);
1150
1151static ssize_t subsystem_rev_id_show(struct device *dev,
1152 struct device_attribute *attr, char *buf)
1153{
1154 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1155
1156 return sprintf(buf, "0x%04x\n",
1157 be16_to_cpu(dcr->subsystem_revision_id));
1158}
1159static DEVICE_ATTR_RO(subsystem_rev_id);
1160
1161static ssize_t subsystem_device_show(struct device *dev,
1162 struct device_attribute *attr, char *buf)
1163{
1164 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1165
1166 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1167}
1168static DEVICE_ATTR_RO(subsystem_device);
1169
8cc6ddfc
DW
1170static int num_nvdimm_formats(struct nvdimm *nvdimm)
1171{
1172 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1173 int formats = 0;
1174
1175 if (nfit_mem->memdev_pmem)
1176 formats++;
1177 if (nfit_mem->memdev_bdw)
1178 formats++;
1179 return formats;
1180}
1181
e6dfb2de
DW
1182static ssize_t format_show(struct device *dev,
1183 struct device_attribute *attr, char *buf)
1184{
1185 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1186
1bcbf42d 1187 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
e6dfb2de
DW
1188}
1189static DEVICE_ATTR_RO(format);
1190
8cc6ddfc
DW
1191static ssize_t format1_show(struct device *dev,
1192 struct device_attribute *attr, char *buf)
1193{
1194 u32 handle;
1195 ssize_t rc = -ENXIO;
1196 struct nfit_mem *nfit_mem;
1197 struct nfit_memdev *nfit_memdev;
1198 struct acpi_nfit_desc *acpi_desc;
1199 struct nvdimm *nvdimm = to_nvdimm(dev);
1200 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1201
1202 nfit_mem = nvdimm_provider_data(nvdimm);
1203 acpi_desc = nfit_mem->acpi_desc;
1204 handle = to_nfit_memdev(dev)->device_handle;
1205
1206 /* assumes DIMMs have at most 2 published interface codes */
1207 mutex_lock(&acpi_desc->init_mutex);
1208 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1209 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1210 struct nfit_dcr *nfit_dcr;
1211
1212 if (memdev->device_handle != handle)
1213 continue;
1214
1215 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1216 if (nfit_dcr->dcr->region_index != memdev->region_index)
1217 continue;
1218 if (nfit_dcr->dcr->code == dcr->code)
1219 continue;
1bcbf42d
DW
1220 rc = sprintf(buf, "0x%04x\n",
1221 le16_to_cpu(nfit_dcr->dcr->code));
8cc6ddfc
DW
1222 break;
1223 }
1224 if (rc != ENXIO)
1225 break;
1226 }
1227 mutex_unlock(&acpi_desc->init_mutex);
1228 return rc;
1229}
1230static DEVICE_ATTR_RO(format1);
1231
1232static ssize_t formats_show(struct device *dev,
1233 struct device_attribute *attr, char *buf)
1234{
1235 struct nvdimm *nvdimm = to_nvdimm(dev);
1236
1237 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1238}
1239static DEVICE_ATTR_RO(formats);
1240
e6dfb2de
DW
1241static ssize_t serial_show(struct device *dev,
1242 struct device_attribute *attr, char *buf)
1243{
1244 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1245
5ad9a7fd 1246 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
e6dfb2de
DW
1247}
1248static DEVICE_ATTR_RO(serial);
1249
a94e3fbe
DW
1250static ssize_t family_show(struct device *dev,
1251 struct device_attribute *attr, char *buf)
1252{
1253 struct nvdimm *nvdimm = to_nvdimm(dev);
1254 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1255
1256 if (nfit_mem->family < 0)
1257 return -ENXIO;
1258 return sprintf(buf, "%d\n", nfit_mem->family);
1259}
1260static DEVICE_ATTR_RO(family);
1261
1262static ssize_t dsm_mask_show(struct device *dev,
1263 struct device_attribute *attr, char *buf)
1264{
1265 struct nvdimm *nvdimm = to_nvdimm(dev);
1266 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1267
1268 if (nfit_mem->family < 0)
1269 return -ENXIO;
1270 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1271}
1272static DEVICE_ATTR_RO(dsm_mask);
1273
58138820
DW
1274static ssize_t flags_show(struct device *dev,
1275 struct device_attribute *attr, char *buf)
1276{
1277 u16 flags = to_nfit_memdev(dev)->flags;
1278
ffab9385 1279 return sprintf(buf, "%s%s%s%s%s%s%s\n",
402bae59
TK
1280 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1281 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1282 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
ca321d1c 1283 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
ffab9385
DW
1284 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1285 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1286 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
58138820
DW
1287}
1288static DEVICE_ATTR_RO(flags);
1289
38a879ba
TK
1290static ssize_t id_show(struct device *dev,
1291 struct device_attribute *attr, char *buf)
1292{
1293 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1294
1295 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1296 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1297 be16_to_cpu(dcr->vendor_id),
1298 dcr->manufacturing_location,
1299 be16_to_cpu(dcr->manufacturing_date),
1300 be32_to_cpu(dcr->serial_number));
1301 else
1302 return sprintf(buf, "%04x-%08x\n",
1303 be16_to_cpu(dcr->vendor_id),
1304 be32_to_cpu(dcr->serial_number));
1305}
1306static DEVICE_ATTR_RO(id);
1307
e6dfb2de
DW
1308static struct attribute *acpi_nfit_dimm_attributes[] = {
1309 &dev_attr_handle.attr,
1310 &dev_attr_phys_id.attr,
1311 &dev_attr_vendor.attr,
1312 &dev_attr_device.attr,
6ca72085
DW
1313 &dev_attr_rev_id.attr,
1314 &dev_attr_subsystem_vendor.attr,
1315 &dev_attr_subsystem_device.attr,
1316 &dev_attr_subsystem_rev_id.attr,
e6dfb2de 1317 &dev_attr_format.attr,
8cc6ddfc
DW
1318 &dev_attr_formats.attr,
1319 &dev_attr_format1.attr,
e6dfb2de 1320 &dev_attr_serial.attr,
58138820 1321 &dev_attr_flags.attr,
38a879ba 1322 &dev_attr_id.attr,
a94e3fbe
DW
1323 &dev_attr_family.attr,
1324 &dev_attr_dsm_mask.attr,
e6dfb2de
DW
1325 NULL,
1326};
1327
1328static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1329 struct attribute *a, int n)
1330{
1331 struct device *dev = container_of(kobj, struct device, kobj);
8cc6ddfc 1332 struct nvdimm *nvdimm = to_nvdimm(dev);
e6dfb2de 1333
1499934d
DW
1334 if (!to_nfit_dcr(dev)) {
1335 /* Without a dcr only the memdev attributes can be surfaced */
1336 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1337 || a == &dev_attr_flags.attr
1338 || a == &dev_attr_family.attr
1339 || a == &dev_attr_dsm_mask.attr)
1340 return a->mode;
8cc6ddfc 1341 return 0;
1499934d
DW
1342 }
1343
8cc6ddfc 1344 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
e6dfb2de 1345 return 0;
8cc6ddfc 1346 return a->mode;
e6dfb2de
DW
1347}
1348
1349static struct attribute_group acpi_nfit_dimm_attribute_group = {
1350 .name = "nfit",
1351 .attrs = acpi_nfit_dimm_attributes,
1352 .is_visible = acpi_nfit_dimm_attr_visible,
1353};
1354
1355static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
62232e45 1356 &nvdimm_attribute_group,
4d88a97a 1357 &nd_device_attribute_group,
e6dfb2de
DW
1358 &acpi_nfit_dimm_attribute_group,
1359 NULL,
1360};
1361
1362static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1363 u32 device_handle)
1364{
1365 struct nfit_mem *nfit_mem;
1366
1367 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1368 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1369 return nfit_mem->nvdimm;
1370
1371 return NULL;
1372}
1373
231bf117 1374void __acpi_nvdimm_notify(struct device *dev, u32 event)
ba9c8dd3
DW
1375{
1376 struct nfit_mem *nfit_mem;
1377 struct acpi_nfit_desc *acpi_desc;
1378
1379 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
1380 event);
1381
1382 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1383 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1384 event);
1385 return;
1386 }
1387
1388 acpi_desc = dev_get_drvdata(dev->parent);
1389 if (!acpi_desc)
1390 return;
1391
1392 /*
1393 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1394 * is still valid.
1395 */
1396 nfit_mem = dev_get_drvdata(dev);
1397 if (nfit_mem && nfit_mem->flags_attr)
1398 sysfs_notify_dirent(nfit_mem->flags_attr);
1399}
231bf117 1400EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
ba9c8dd3
DW
1401
1402static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1403{
1404 struct acpi_device *adev = data;
1405 struct device *dev = &adev->dev;
1406
1407 device_lock(dev->parent);
1408 __acpi_nvdimm_notify(dev, event);
1409 device_unlock(dev->parent);
1410}
1411
62232e45
DW
1412static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1413 struct nfit_mem *nfit_mem, u32 device_handle)
1414{
1415 struct acpi_device *adev, *adev_dimm;
1416 struct device *dev = acpi_desc->dev;
31eca76b 1417 unsigned long dsm_mask;
41c8bdb3 1418 const guid_t *guid;
60e95f43 1419 int i;
ba650cfc 1420 int family = -1;
62232e45 1421
e3654eca
DW
1422 /* nfit test assumes 1:1 relationship between commands and dsms */
1423 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
31eca76b 1424 nfit_mem->family = NVDIMM_FAMILY_INTEL;
62232e45
DW
1425 adev = to_acpi_dev(acpi_desc);
1426 if (!adev)
1427 return 0;
1428
1429 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1430 nfit_mem->adev = adev_dimm;
1431 if (!adev_dimm) {
1432 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1433 device_handle);
4d88a97a 1434 return force_enable_dimms ? 0 : -ENODEV;
62232e45
DW
1435 }
1436
ba9c8dd3
DW
1437 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1438 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1439 dev_err(dev, "%s: notification registration failed\n",
1440 dev_name(&adev_dimm->dev));
1441 return -ENXIO;
1442 }
1443
31eca76b 1444 /*
e02fb726 1445 * Until standardization materializes we need to consider 4
a7225598 1446 * different command sets. Note, that checking for function0 (bit0)
41c8bdb3 1447 * tells us if any commands are reachable through this GUID.
31eca76b 1448 */
e02fb726 1449 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
a7225598 1450 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
ba650cfc
LK
1451 if (family < 0 || i == default_dsm_family)
1452 family = i;
31eca76b
DW
1453
1454 /* limit the supported commands to those that are publicly documented */
ba650cfc 1455 nfit_mem->family = family;
095ab4b3
LK
1456 if (override_dsm_mask && !disable_vendor_specific)
1457 dsm_mask = override_dsm_mask;
1458 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
31eca76b 1459 dsm_mask = 0x3fe;
87554098
DW
1460 if (disable_vendor_specific)
1461 dsm_mask &= ~(1 << ND_CMD_VENDOR);
e02fb726 1462 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
31eca76b 1463 dsm_mask = 0x1c3c76;
e02fb726 1464 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
31eca76b 1465 dsm_mask = 0x1fe;
87554098
DW
1466 if (disable_vendor_specific)
1467 dsm_mask &= ~(1 << 8);
e02fb726 1468 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1469 dsm_mask = 0xffffffff;
87554098 1470 } else {
a7225598 1471 dev_dbg(dev, "unknown dimm command family\n");
31eca76b 1472 nfit_mem->family = -1;
a7225598
DW
1473 /* DSMs are optional, continue loading the driver... */
1474 return 0;
31eca76b
DW
1475 }
1476
41c8bdb3 1477 guid = to_nfit_uuid(nfit_mem->family);
31eca76b 1478 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
41c8bdb3 1479 if (acpi_check_dsm(adev_dimm->handle, guid.b, 1, 1ULL << i))
62232e45
DW
1480 set_bit(i, &nfit_mem->dsm_mask);
1481
60e95f43 1482 return 0;
62232e45
DW
1483}
1484
ba9c8dd3
DW
1485static void shutdown_dimm_notify(void *data)
1486{
1487 struct acpi_nfit_desc *acpi_desc = data;
1488 struct nfit_mem *nfit_mem;
1489
1490 mutex_lock(&acpi_desc->init_mutex);
1491 /*
1492 * Clear out the nfit_mem->flags_attr and shut down dimm event
1493 * notifications.
1494 */
1495 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
231bf117
DW
1496 struct acpi_device *adev_dimm = nfit_mem->adev;
1497
ba9c8dd3
DW
1498 if (nfit_mem->flags_attr) {
1499 sysfs_put(nfit_mem->flags_attr);
1500 nfit_mem->flags_attr = NULL;
1501 }
231bf117
DW
1502 if (adev_dimm)
1503 acpi_remove_notify_handler(adev_dimm->handle,
1504 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
ba9c8dd3
DW
1505 }
1506 mutex_unlock(&acpi_desc->init_mutex);
1507}
1508
e6dfb2de
DW
1509static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1510{
1511 struct nfit_mem *nfit_mem;
ba9c8dd3
DW
1512 int dimm_count = 0, rc;
1513 struct nvdimm *nvdimm;
e6dfb2de
DW
1514
1515 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
e5ae3b25 1516 struct acpi_nfit_flush_address *flush;
31eca76b 1517 unsigned long flags = 0, cmd_mask;
caa603aa 1518 struct nfit_memdev *nfit_memdev;
e6dfb2de 1519 u32 device_handle;
58138820 1520 u16 mem_flags;
e6dfb2de
DW
1521
1522 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1523 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1524 if (nvdimm) {
20985164 1525 dimm_count++;
e6dfb2de
DW
1526 continue;
1527 }
1528
1529 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
8f078b38 1530 set_bit(NDD_ALIASING, &flags);
e6dfb2de 1531
caa603aa
DW
1532 /* collate flags across all memdevs for this dimm */
1533 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1534 struct acpi_nfit_memory_map *dimm_memdev;
1535
1536 dimm_memdev = __to_nfit_memdev(nfit_mem);
1537 if (dimm_memdev->device_handle
1538 != nfit_memdev->memdev->device_handle)
1539 continue;
1540 dimm_memdev->flags |= nfit_memdev->memdev->flags;
1541 }
1542
58138820 1543 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
ca321d1c 1544 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
8f078b38 1545 set_bit(NDD_UNARMED, &flags);
58138820 1546
62232e45
DW
1547 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1548 if (rc)
1549 continue;
1550
e3654eca 1551 /*
31eca76b
DW
1552 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1553 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1554 * userspace interface.
e3654eca 1555 */
31eca76b
DW
1556 cmd_mask = 1UL << ND_CMD_CALL;
1557 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1558 cmd_mask |= nfit_mem->dsm_mask;
1559
e5ae3b25
DW
1560 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1561 : NULL;
e6dfb2de 1562 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
62232e45 1563 acpi_nfit_dimm_attribute_groups,
e5ae3b25
DW
1564 flags, cmd_mask, flush ? flush->hint_count : 0,
1565 nfit_mem->flush_wpq);
e6dfb2de
DW
1566 if (!nvdimm)
1567 return -ENOMEM;
1568
1569 nfit_mem->nvdimm = nvdimm;
4d88a97a 1570 dimm_count++;
58138820
DW
1571
1572 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1573 continue;
1574
1499934d 1575 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
58138820 1576 nvdimm_name(nvdimm),
402bae59
TK
1577 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1578 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1579 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
1499934d
DW
1580 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
1581 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
58138820 1582
e6dfb2de
DW
1583 }
1584
ba9c8dd3
DW
1585 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1586 if (rc)
1587 return rc;
1588
1589 /*
1590 * Now that dimms are successfully registered, and async registration
1591 * is flushed, attempt to enable event notification.
1592 */
1593 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1594 struct kernfs_node *nfit_kernfs;
1595
1596 nvdimm = nfit_mem->nvdimm;
1597 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1598 if (nfit_kernfs)
1599 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1600 "flags");
1601 sysfs_put(nfit_kernfs);
1602 if (!nfit_mem->flags_attr)
1603 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1604 nvdimm_name(nvdimm));
1605 }
1606
1607 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1608 acpi_desc);
e6dfb2de
DW
1609}
1610
62232e45
DW
1611static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1612{
1613 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
41c8bdb3 1614 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
62232e45
DW
1615 struct acpi_device *adev;
1616 int i;
1617
e3654eca 1618 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
62232e45
DW
1619 adev = to_acpi_dev(acpi_desc);
1620 if (!adev)
1621 return;
1622
d4f32367 1623 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
41c8bdb3 1624 if (acpi_check_dsm(adev->handle, guid.b, 1, 1ULL << i))
e3654eca 1625 set_bit(i, &nd_desc->cmd_mask);
62232e45
DW
1626}
1627
1f7df6f8
DW
1628static ssize_t range_index_show(struct device *dev,
1629 struct device_attribute *attr, char *buf)
1630{
1631 struct nd_region *nd_region = to_nd_region(dev);
1632 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1633
1634 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1635}
1636static DEVICE_ATTR_RO(range_index);
1637
1638static struct attribute *acpi_nfit_region_attributes[] = {
1639 &dev_attr_range_index.attr,
1640 NULL,
1641};
1642
1643static struct attribute_group acpi_nfit_region_attribute_group = {
1644 .name = "nfit",
1645 .attrs = acpi_nfit_region_attributes,
1646};
1647
1648static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1649 &nd_region_attribute_group,
1650 &nd_mapping_attribute_group,
3d88002e 1651 &nd_device_attribute_group,
74ae66c3 1652 &nd_numa_attribute_group,
1f7df6f8
DW
1653 &acpi_nfit_region_attribute_group,
1654 NULL,
1655};
1656
eaf96153
DW
1657/* enough info to uniquely specify an interleave set */
1658struct nfit_set_info {
1659 struct nfit_set_info_map {
1660 u64 region_offset;
1661 u32 serial_number;
1662 u32 pad;
1663 } mapping[0];
1664};
1665
1666static size_t sizeof_nfit_set_info(int num_mappings)
1667{
1668 return sizeof(struct nfit_set_info)
1669 + num_mappings * sizeof(struct nfit_set_info_map);
1670}
1671
86ef58a4 1672static int cmp_map_compat(const void *m0, const void *m1)
eaf96153
DW
1673{
1674 const struct nfit_set_info_map *map0 = m0;
1675 const struct nfit_set_info_map *map1 = m1;
1676
1677 return memcmp(&map0->region_offset, &map1->region_offset,
1678 sizeof(u64));
1679}
1680
86ef58a4
DW
1681static int cmp_map(const void *m0, const void *m1)
1682{
1683 const struct nfit_set_info_map *map0 = m0;
1684 const struct nfit_set_info_map *map1 = m1;
1685
b03b99a3
DW
1686 if (map0->region_offset < map1->region_offset)
1687 return -1;
1688 else if (map0->region_offset > map1->region_offset)
1689 return 1;
1690 return 0;
86ef58a4
DW
1691}
1692
eaf96153
DW
1693/* Retrieve the nth entry referencing this spa */
1694static struct acpi_nfit_memory_map *memdev_from_spa(
1695 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1696{
1697 struct nfit_memdev *nfit_memdev;
1698
1699 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1700 if (nfit_memdev->memdev->range_index == range_index)
1701 if (n-- == 0)
1702 return nfit_memdev->memdev;
1703 return NULL;
1704}
1705
1706static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1707 struct nd_region_desc *ndr_desc,
1708 struct acpi_nfit_system_address *spa)
1709{
1710 int i, spa_type = nfit_spa_type(spa);
1711 struct device *dev = acpi_desc->dev;
1712 struct nd_interleave_set *nd_set;
1713 u16 nr = ndr_desc->num_mappings;
1714 struct nfit_set_info *info;
1715
1716 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1717 /* pass */;
1718 else
1719 return 0;
1720
1721 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1722 if (!nd_set)
1723 return -ENOMEM;
1724
1725 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1726 if (!info)
1727 return -ENOMEM;
1728 for (i = 0; i < nr; i++) {
44c462eb 1729 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
eaf96153 1730 struct nfit_set_info_map *map = &info->mapping[i];
44c462eb 1731 struct nvdimm *nvdimm = mapping->nvdimm;
eaf96153
DW
1732 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1733 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1734 spa->range_index, i);
1735
1736 if (!memdev || !nfit_mem->dcr) {
1737 dev_err(dev, "%s: failed to find DCR\n", __func__);
1738 return -ENODEV;
1739 }
1740
1741 map->region_offset = memdev->region_offset;
1742 map->serial_number = nfit_mem->dcr->serial_number;
1743 }
1744
1745 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1746 cmp_map, NULL);
1747 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
86ef58a4
DW
1748
1749 /* support namespaces created with the wrong sort order */
1750 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1751 cmp_map_compat, NULL);
1752 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1753
eaf96153
DW
1754 ndr_desc->nd_set = nd_set;
1755 devm_kfree(dev, info);
1756
1757 return 0;
1758}
1759
047fc8a1
RZ
1760static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1761{
1762 struct acpi_nfit_interleave *idt = mmio->idt;
1763 u32 sub_line_offset, line_index, line_offset;
1764 u64 line_no, table_skip_count, table_offset;
1765
1766 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1767 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1768 line_offset = idt->line_offset[line_index]
1769 * mmio->line_size;
1770 table_offset = table_skip_count * mmio->table_size;
1771
1772 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1773}
1774
de4a196c 1775static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
047fc8a1
RZ
1776{
1777 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1778 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
68202c9f 1779 const u32 STATUS_MASK = 0x80000037;
047fc8a1
RZ
1780
1781 if (mmio->num_lines)
1782 offset = to_interleave_offset(offset, mmio);
1783
68202c9f 1784 return readl(mmio->addr.base + offset) & STATUS_MASK;
047fc8a1
RZ
1785}
1786
1787static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1788 resource_size_t dpa, unsigned int len, unsigned int write)
1789{
1790 u64 cmd, offset;
1791 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1792
1793 enum {
1794 BCW_OFFSET_MASK = (1ULL << 48)-1,
1795 BCW_LEN_SHIFT = 48,
1796 BCW_LEN_MASK = (1ULL << 8) - 1,
1797 BCW_CMD_SHIFT = 56,
1798 };
1799
1800 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1801 len = len >> L1_CACHE_SHIFT;
1802 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1803 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1804
1805 offset = nfit_blk->cmd_offset + mmio->size * bw;
1806 if (mmio->num_lines)
1807 offset = to_interleave_offset(offset, mmio);
1808
67a3e8fe 1809 writeq(cmd, mmio->addr.base + offset);
f284a4f2 1810 nvdimm_flush(nfit_blk->nd_region);
f0f2c072 1811
aef25338 1812 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
67a3e8fe 1813 readq(mmio->addr.base + offset);
047fc8a1
RZ
1814}
1815
1816static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1817 resource_size_t dpa, void *iobuf, size_t len, int rw,
1818 unsigned int lane)
1819{
1820 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1821 unsigned int copied = 0;
1822 u64 base_offset;
1823 int rc;
1824
1825 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1826 + lane * mmio->size;
047fc8a1
RZ
1827 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1828 while (len) {
1829 unsigned int c;
1830 u64 offset;
1831
1832 if (mmio->num_lines) {
1833 u32 line_offset;
1834
1835 offset = to_interleave_offset(base_offset + copied,
1836 mmio);
1837 div_u64_rem(offset, mmio->line_size, &line_offset);
1838 c = min_t(size_t, len, mmio->line_size - line_offset);
1839 } else {
1840 offset = base_offset + nfit_blk->bdw_offset;
1841 c = len;
1842 }
1843
1844 if (rw)
67a3e8fe 1845 memcpy_to_pmem(mmio->addr.aperture + offset,
c2ad2954 1846 iobuf + copied, c);
67a3e8fe 1847 else {
aef25338 1848 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
67a3e8fe
RZ
1849 mmio_flush_range((void __force *)
1850 mmio->addr.aperture + offset, c);
1851
6abccd1b 1852 memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
67a3e8fe 1853 }
047fc8a1
RZ
1854
1855 copied += c;
1856 len -= c;
1857 }
c2ad2954
RZ
1858
1859 if (rw)
f284a4f2 1860 nvdimm_flush(nfit_blk->nd_region);
c2ad2954 1861
047fc8a1
RZ
1862 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1863 return rc;
1864}
1865
1866static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1867 resource_size_t dpa, void *iobuf, u64 len, int rw)
1868{
1869 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1870 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1871 struct nd_region *nd_region = nfit_blk->nd_region;
1872 unsigned int lane, copied = 0;
1873 int rc = 0;
1874
1875 lane = nd_region_acquire_lane(nd_region);
1876 while (len) {
1877 u64 c = min(len, mmio->size);
1878
1879 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1880 iobuf + copied, c, rw, lane);
1881 if (rc)
1882 break;
1883
1884 copied += c;
1885 len -= c;
1886 }
1887 nd_region_release_lane(nd_region, lane);
1888
1889 return rc;
1890}
1891
047fc8a1
RZ
1892static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1893 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1894{
1895 if (idt) {
1896 mmio->num_lines = idt->line_count;
1897 mmio->line_size = idt->line_size;
1898 if (interleave_ways == 0)
1899 return -ENXIO;
1900 mmio->table_size = mmio->num_lines * interleave_ways
1901 * mmio->line_size;
1902 }
1903
1904 return 0;
1905}
1906
f0f2c072
RZ
1907static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1908 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1909{
1910 struct nd_cmd_dimm_flags flags;
1911 int rc;
1912
1913 memset(&flags, 0, sizeof(flags));
1914 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
aef25338 1915 sizeof(flags), NULL);
f0f2c072
RZ
1916
1917 if (rc >= 0 && flags.status == 0)
1918 nfit_blk->dimm_flags = flags.flags;
1919 else if (rc == -ENOTTY) {
1920 /* fall back to a conservative default */
aef25338 1921 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
f0f2c072
RZ
1922 rc = 0;
1923 } else
1924 rc = -ENXIO;
1925
1926 return rc;
1927}
1928
047fc8a1
RZ
1929static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1930 struct device *dev)
1931{
1932 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
047fc8a1
RZ
1933 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1934 struct nfit_blk_mmio *mmio;
1935 struct nfit_blk *nfit_blk;
1936 struct nfit_mem *nfit_mem;
1937 struct nvdimm *nvdimm;
1938 int rc;
1939
1940 nvdimm = nd_blk_region_to_dimm(ndbr);
1941 nfit_mem = nvdimm_provider_data(nvdimm);
1942 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1943 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1944 nfit_mem ? "" : " nfit_mem",
193ccca4
DW
1945 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1946 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
047fc8a1
RZ
1947 return -ENXIO;
1948 }
1949
1950 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1951 if (!nfit_blk)
1952 return -ENOMEM;
1953 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1954 nfit_blk->nd_region = to_nd_region(dev);
1955
1956 /* map block aperture memory */
1957 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1958 mmio = &nfit_blk->mmio[BDW];
29b9aa0a
DW
1959 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
1960 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
67a3e8fe 1961 if (!mmio->addr.base) {
047fc8a1
RZ
1962 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1963 nvdimm_name(nvdimm));
1964 return -ENOMEM;
1965 }
1966 mmio->size = nfit_mem->bdw->size;
1967 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1968 mmio->idt = nfit_mem->idt_bdw;
1969 mmio->spa = nfit_mem->spa_bdw;
1970 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1971 nfit_mem->memdev_bdw->interleave_ways);
1972 if (rc) {
1973 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1974 __func__, nvdimm_name(nvdimm));
1975 return rc;
1976 }
1977
1978 /* map block control memory */
1979 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1980 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1981 mmio = &nfit_blk->mmio[DCR];
29b9aa0a
DW
1982 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
1983 nfit_mem->spa_dcr->length);
67a3e8fe 1984 if (!mmio->addr.base) {
047fc8a1
RZ
1985 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1986 nvdimm_name(nvdimm));
1987 return -ENOMEM;
1988 }
1989 mmio->size = nfit_mem->dcr->window_size;
1990 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1991 mmio->idt = nfit_mem->idt_dcr;
1992 mmio->spa = nfit_mem->spa_dcr;
1993 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1994 nfit_mem->memdev_dcr->interleave_ways);
1995 if (rc) {
1996 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1997 __func__, nvdimm_name(nvdimm));
1998 return rc;
1999 }
2000
f0f2c072
RZ
2001 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2002 if (rc < 0) {
2003 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
2004 __func__, nvdimm_name(nvdimm));
2005 return rc;
2006 }
2007
f284a4f2 2008 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
c2ad2954
RZ
2009 dev_warn(dev, "unable to guarantee persistence of writes\n");
2010
047fc8a1
RZ
2011 if (mmio->line_size == 0)
2012 return 0;
2013
2014 if ((u32) nfit_blk->cmd_offset % mmio->line_size
2015 + 8 > mmio->line_size) {
2016 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2017 return -ENXIO;
2018 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2019 + 8 > mmio->line_size) {
2020 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2021 return -ENXIO;
2022 }
2023
2024 return 0;
2025}
2026
aef25338 2027static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1cf03c00 2028 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
0caeef63 2029{
aef25338 2030 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1cf03c00 2031 struct acpi_nfit_system_address *spa = nfit_spa->spa;
aef25338
DW
2032 int cmd_rc, rc;
2033
1cf03c00
DW
2034 cmd->address = spa->address;
2035 cmd->length = spa->length;
aef25338
DW
2036 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2037 sizeof(*cmd), &cmd_rc);
2038 if (rc < 0)
2039 return rc;
1cf03c00 2040 return cmd_rc;
0caeef63
VV
2041}
2042
1cf03c00 2043static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
0caeef63
VV
2044{
2045 int rc;
1cf03c00
DW
2046 int cmd_rc;
2047 struct nd_cmd_ars_start ars_start;
2048 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2049 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
0caeef63 2050
1cf03c00
DW
2051 memset(&ars_start, 0, sizeof(ars_start));
2052 ars_start.address = spa->address;
2053 ars_start.length = spa->length;
2054 if (nfit_spa_type(spa) == NFIT_SPA_PM)
2055 ars_start.type = ND_ARS_PERSISTENT;
2056 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2057 ars_start.type = ND_ARS_VOLATILE;
2058 else
2059 return -ENOTTY;
aef25338 2060
1cf03c00
DW
2061 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2062 sizeof(ars_start), &cmd_rc);
aef25338 2063
1cf03c00
DW
2064 if (rc < 0)
2065 return rc;
2066 return cmd_rc;
0caeef63
VV
2067}
2068
1cf03c00 2069static int ars_continue(struct acpi_nfit_desc *acpi_desc)
0caeef63 2070{
aef25338 2071 int rc, cmd_rc;
1cf03c00
DW
2072 struct nd_cmd_ars_start ars_start;
2073 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2074 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2075
2076 memset(&ars_start, 0, sizeof(ars_start));
2077 ars_start.address = ars_status->restart_address;
2078 ars_start.length = ars_status->restart_length;
2079 ars_start.type = ars_status->type;
2080 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2081 sizeof(ars_start), &cmd_rc);
2082 if (rc < 0)
2083 return rc;
2084 return cmd_rc;
2085}
0caeef63 2086
1cf03c00
DW
2087static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2088{
2089 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2090 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2091 int rc, cmd_rc;
aef25338 2092
1cf03c00
DW
2093 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2094 acpi_desc->ars_status_size, &cmd_rc);
2095 if (rc < 0)
2096 return rc;
2097 return cmd_rc;
0caeef63
VV
2098}
2099
82aa37cf 2100static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
1cf03c00 2101 struct nd_cmd_ars_status *ars_status)
0caeef63 2102{
82aa37cf 2103 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
0caeef63
VV
2104 int rc;
2105 u32 i;
2106
82aa37cf
DW
2107 /*
2108 * First record starts at 44 byte offset from the start of the
2109 * payload.
2110 */
2111 if (ars_status->out_length < 44)
2112 return 0;
0caeef63 2113 for (i = 0; i < ars_status->num_records; i++) {
82aa37cf
DW
2114 /* only process full records */
2115 if (ars_status->out_length
2116 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2117 break;
0caeef63
VV
2118 rc = nvdimm_bus_add_poison(nvdimm_bus,
2119 ars_status->records[i].err_address,
2120 ars_status->records[i].length);
2121 if (rc)
2122 return rc;
2123 }
82aa37cf
DW
2124 if (i < ars_status->num_records)
2125 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
0caeef63
VV
2126
2127 return 0;
2128}
2129
af1996ef
TK
2130static void acpi_nfit_remove_resource(void *data)
2131{
2132 struct resource *res = data;
2133
2134 remove_resource(res);
2135}
2136
2137static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2138 struct nd_region_desc *ndr_desc)
2139{
2140 struct resource *res, *nd_res = ndr_desc->res;
2141 int is_pmem, ret;
2142
2143 /* No operation if the region is already registered as PMEM */
2144 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2145 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2146 if (is_pmem == REGION_INTERSECTS)
2147 return 0;
2148
2149 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2150 if (!res)
2151 return -ENOMEM;
2152
2153 res->name = "Persistent Memory";
2154 res->start = nd_res->start;
2155 res->end = nd_res->end;
2156 res->flags = IORESOURCE_MEM;
2157 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2158
2159 ret = insert_resource(&iomem_resource, res);
2160 if (ret)
2161 return ret;
2162
d932dd2c
SV
2163 ret = devm_add_action_or_reset(acpi_desc->dev,
2164 acpi_nfit_remove_resource,
2165 res);
2166 if (ret)
af1996ef 2167 return ret;
af1996ef
TK
2168
2169 return 0;
2170}
2171
1f7df6f8 2172static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
44c462eb 2173 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
1f7df6f8 2174 struct acpi_nfit_memory_map *memdev,
1cf03c00 2175 struct nfit_spa *nfit_spa)
1f7df6f8
DW
2176{
2177 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2178 memdev->device_handle);
1cf03c00 2179 struct acpi_nfit_system_address *spa = nfit_spa->spa;
047fc8a1 2180 struct nd_blk_region_desc *ndbr_desc;
1f7df6f8
DW
2181 struct nfit_mem *nfit_mem;
2182 int blk_valid = 0;
2183
2184 if (!nvdimm) {
2185 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2186 spa->range_index, memdev->device_handle);
2187 return -ENODEV;
2188 }
2189
44c462eb 2190 mapping->nvdimm = nvdimm;
1f7df6f8
DW
2191 switch (nfit_spa_type(spa)) {
2192 case NFIT_SPA_PM:
2193 case NFIT_SPA_VOLATILE:
44c462eb
DW
2194 mapping->start = memdev->address;
2195 mapping->size = memdev->region_size;
1f7df6f8
DW
2196 break;
2197 case NFIT_SPA_DCR:
2198 nfit_mem = nvdimm_provider_data(nvdimm);
2199 if (!nfit_mem || !nfit_mem->bdw) {
2200 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2201 spa->range_index, nvdimm_name(nvdimm));
2202 } else {
44c462eb
DW
2203 mapping->size = nfit_mem->bdw->capacity;
2204 mapping->start = nfit_mem->bdw->start_address;
5212e11f 2205 ndr_desc->num_lanes = nfit_mem->bdw->windows;
1f7df6f8
DW
2206 blk_valid = 1;
2207 }
2208
44c462eb 2209 ndr_desc->mapping = mapping;
1f7df6f8 2210 ndr_desc->num_mappings = blk_valid;
047fc8a1
RZ
2211 ndbr_desc = to_blk_region_desc(ndr_desc);
2212 ndbr_desc->enable = acpi_nfit_blk_region_enable;
6bc75619 2213 ndbr_desc->do_io = acpi_desc->blk_do_io;
1cf03c00
DW
2214 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2215 ndr_desc);
2216 if (!nfit_spa->nd_region)
1f7df6f8
DW
2217 return -ENOMEM;
2218 break;
2219 }
2220
2221 return 0;
2222}
2223
c2f32acd
LCY
2224static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2225{
2226 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2227 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2228 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2229 nfit_spa_type(spa) == NFIT_SPA_PCD);
2230}
2231
1f7df6f8
DW
2232static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2233 struct nfit_spa *nfit_spa)
2234{
44c462eb 2235 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
1f7df6f8 2236 struct acpi_nfit_system_address *spa = nfit_spa->spa;
047fc8a1
RZ
2237 struct nd_blk_region_desc ndbr_desc;
2238 struct nd_region_desc *ndr_desc;
1f7df6f8 2239 struct nfit_memdev *nfit_memdev;
1f7df6f8
DW
2240 struct nvdimm_bus *nvdimm_bus;
2241 struct resource res;
eaf96153 2242 int count = 0, rc;
1f7df6f8 2243
1cf03c00 2244 if (nfit_spa->nd_region)
20985164
VV
2245 return 0;
2246
c2f32acd 2247 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
1f7df6f8
DW
2248 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2249 __func__);
2250 return 0;
2251 }
2252
2253 memset(&res, 0, sizeof(res));
44c462eb 2254 memset(&mappings, 0, sizeof(mappings));
047fc8a1 2255 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1f7df6f8
DW
2256 res.start = spa->address;
2257 res.end = res.start + spa->length - 1;
047fc8a1
RZ
2258 ndr_desc = &ndbr_desc.ndr_desc;
2259 ndr_desc->res = &res;
2260 ndr_desc->provider_data = nfit_spa;
2261 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
41d7a6d6
TK
2262 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2263 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2264 spa->proximity_domain);
2265 else
2266 ndr_desc->numa_node = NUMA_NO_NODE;
2267
1f7df6f8
DW
2268 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2269 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
44c462eb 2270 struct nd_mapping_desc *mapping;
1f7df6f8
DW
2271
2272 if (memdev->range_index != spa->range_index)
2273 continue;
2274 if (count >= ND_MAX_MAPPINGS) {
2275 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2276 spa->range_index, ND_MAX_MAPPINGS);
2277 return -ENXIO;
2278 }
44c462eb
DW
2279 mapping = &mappings[count++];
2280 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
1cf03c00 2281 memdev, nfit_spa);
1f7df6f8 2282 if (rc)
1cf03c00 2283 goto out;
1f7df6f8
DW
2284 }
2285
44c462eb 2286 ndr_desc->mapping = mappings;
047fc8a1
RZ
2287 ndr_desc->num_mappings = count;
2288 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
eaf96153 2289 if (rc)
1cf03c00 2290 goto out;
eaf96153 2291
1f7df6f8
DW
2292 nvdimm_bus = acpi_desc->nvdimm_bus;
2293 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
af1996ef 2294 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
48901165 2295 if (rc) {
af1996ef
TK
2296 dev_warn(acpi_desc->dev,
2297 "failed to insert pmem resource to iomem: %d\n",
2298 rc);
48901165 2299 goto out;
0caeef63 2300 }
48901165 2301
1cf03c00
DW
2302 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2303 ndr_desc);
2304 if (!nfit_spa->nd_region)
2305 rc = -ENOMEM;
1f7df6f8 2306 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1cf03c00
DW
2307 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2308 ndr_desc);
2309 if (!nfit_spa->nd_region)
2310 rc = -ENOMEM;
c2f32acd
LCY
2311 } else if (nfit_spa_is_virtual(spa)) {
2312 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2313 ndr_desc);
2314 if (!nfit_spa->nd_region)
2315 rc = -ENOMEM;
1f7df6f8 2316 }
20985164 2317
1cf03c00
DW
2318 out:
2319 if (rc)
2320 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2321 nfit_spa->spa->range_index);
2322 return rc;
2323}
2324
2325static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2326 u32 max_ars)
2327{
2328 struct device *dev = acpi_desc->dev;
2329 struct nd_cmd_ars_status *ars_status;
2330
2331 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2332 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2333 return 0;
2334 }
2335
2336 if (acpi_desc->ars_status)
2337 devm_kfree(dev, acpi_desc->ars_status);
2338 acpi_desc->ars_status = NULL;
2339 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2340 if (!ars_status)
2341 return -ENOMEM;
2342 acpi_desc->ars_status = ars_status;
2343 acpi_desc->ars_status_size = max_ars;
1f7df6f8
DW
2344 return 0;
2345}
2346
1cf03c00
DW
2347static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2348 struct nfit_spa *nfit_spa)
2349{
2350 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2351 int rc;
2352
2353 if (!nfit_spa->max_ars) {
2354 struct nd_cmd_ars_cap ars_cap;
2355
2356 memset(&ars_cap, 0, sizeof(ars_cap));
2357 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2358 if (rc < 0)
2359 return rc;
2360 nfit_spa->max_ars = ars_cap.max_ars_out;
2361 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2362 /* check that the supported scrub types match the spa type */
2363 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2364 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2365 return -ENOTTY;
2366 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2367 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2368 return -ENOTTY;
2369 }
2370
2371 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2372 return -ENOMEM;
2373
2374 rc = ars_get_status(acpi_desc);
2375 if (rc < 0 && rc != -ENOSPC)
2376 return rc;
2377
82aa37cf 2378 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
1cf03c00
DW
2379 return -ENOMEM;
2380
2381 return 0;
2382}
2383
2384static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2385 struct nfit_spa *nfit_spa)
2386{
2387 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2388 unsigned int overflow_retry = scrub_overflow_abort;
2389 u64 init_ars_start = 0, init_ars_len = 0;
2390 struct device *dev = acpi_desc->dev;
2391 unsigned int tmo = scrub_timeout;
2392 int rc;
2393
37b137ff 2394 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
1cf03c00
DW
2395 return;
2396
2397 rc = ars_start(acpi_desc, nfit_spa);
2398 /*
2399 * If we timed out the initial scan we'll still be busy here,
2400 * and will wait another timeout before giving up permanently.
2401 */
2402 if (rc < 0 && rc != -EBUSY)
2403 return;
2404
2405 do {
2406 u64 ars_start, ars_len;
2407
2408 if (acpi_desc->cancel)
2409 break;
2410 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2411 if (rc == -ENOTTY)
2412 break;
2413 if (rc == -EBUSY && !tmo) {
2414 dev_warn(dev, "range %d ars timeout, aborting\n",
2415 spa->range_index);
2416 break;
2417 }
2418
2419 if (rc == -EBUSY) {
2420 /*
2421 * Note, entries may be appended to the list
2422 * while the lock is dropped, but the workqueue
2423 * being active prevents entries being deleted /
2424 * freed.
2425 */
2426 mutex_unlock(&acpi_desc->init_mutex);
2427 ssleep(1);
2428 tmo--;
2429 mutex_lock(&acpi_desc->init_mutex);
2430 continue;
2431 }
2432
2433 /* we got some results, but there are more pending... */
2434 if (rc == -ENOSPC && overflow_retry--) {
2435 if (!init_ars_len) {
2436 init_ars_len = acpi_desc->ars_status->length;
2437 init_ars_start = acpi_desc->ars_status->address;
2438 }
2439 rc = ars_continue(acpi_desc);
2440 }
2441
2442 if (rc < 0) {
2443 dev_warn(dev, "range %d ars continuation failed\n",
2444 spa->range_index);
2445 break;
2446 }
2447
2448 if (init_ars_len) {
2449 ars_start = init_ars_start;
2450 ars_len = init_ars_len;
2451 } else {
2452 ars_start = acpi_desc->ars_status->address;
2453 ars_len = acpi_desc->ars_status->length;
2454 }
2455 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2456 spa->range_index, ars_start, ars_len);
2457 /* notify the region about new poison entries */
2458 nvdimm_region_notify(nfit_spa->nd_region,
2459 NVDIMM_REVALIDATE_POISON);
2460 break;
2461 } while (1);
2462}
2463
2464static void acpi_nfit_scrub(struct work_struct *work)
1f7df6f8 2465{
1cf03c00
DW
2466 struct device *dev;
2467 u64 init_scrub_length = 0;
1f7df6f8 2468 struct nfit_spa *nfit_spa;
1cf03c00
DW
2469 u64 init_scrub_address = 0;
2470 bool init_ars_done = false;
2471 struct acpi_nfit_desc *acpi_desc;
2472 unsigned int tmo = scrub_timeout;
2473 unsigned int overflow_retry = scrub_overflow_abort;
2474
2475 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2476 dev = acpi_desc->dev;
1f7df6f8 2477
1cf03c00
DW
2478 /*
2479 * We scrub in 2 phases. The first phase waits for any platform
2480 * firmware initiated scrubs to complete and then we go search for the
2481 * affected spa regions to mark them scanned. In the second phase we
2482 * initiate a directed scrub for every range that was not scrubbed in
37b137ff
VV
2483 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2484 * the first phase, but really only care about running phase 2, where
2485 * regions can be notified of new poison.
1cf03c00
DW
2486 */
2487
2488 /* process platform firmware initiated scrubs */
2489 retry:
2490 mutex_lock(&acpi_desc->init_mutex);
1f7df6f8 2491 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1cf03c00
DW
2492 struct nd_cmd_ars_status *ars_status;
2493 struct acpi_nfit_system_address *spa;
2494 u64 ars_start, ars_len;
2495 int rc;
1f7df6f8 2496
1cf03c00
DW
2497 if (acpi_desc->cancel)
2498 break;
2499
2500 if (nfit_spa->nd_region)
2501 continue;
2502
2503 if (init_ars_done) {
2504 /*
2505 * No need to re-query, we're now just
2506 * reconciling all the ranges covered by the
2507 * initial scrub
2508 */
2509 rc = 0;
2510 } else
2511 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2512
2513 if (rc == -ENOTTY) {
2514 /* no ars capability, just register spa and move on */
2515 acpi_nfit_register_region(acpi_desc, nfit_spa);
2516 continue;
2517 }
2518
2519 if (rc == -EBUSY && !tmo) {
2520 /* fallthrough to directed scrub in phase 2 */
2521 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2522 break;
2523 } else if (rc == -EBUSY) {
2524 mutex_unlock(&acpi_desc->init_mutex);
2525 ssleep(1);
2526 tmo--;
2527 goto retry;
2528 }
2529
2530 /* we got some results, but there are more pending... */
2531 if (rc == -ENOSPC && overflow_retry--) {
2532 ars_status = acpi_desc->ars_status;
2533 /*
2534 * Record the original scrub range, so that we
2535 * can recall all the ranges impacted by the
2536 * initial scrub.
2537 */
2538 if (!init_scrub_length) {
2539 init_scrub_length = ars_status->length;
2540 init_scrub_address = ars_status->address;
2541 }
2542 rc = ars_continue(acpi_desc);
2543 if (rc == 0) {
2544 mutex_unlock(&acpi_desc->init_mutex);
2545 goto retry;
2546 }
2547 }
2548
2549 if (rc < 0) {
2550 /*
2551 * Initial scrub failed, we'll give it one more
2552 * try below...
2553 */
2554 break;
2555 }
2556
2557 /* We got some final results, record completed ranges */
2558 ars_status = acpi_desc->ars_status;
2559 if (init_scrub_length) {
2560 ars_start = init_scrub_address;
2561 ars_len = ars_start + init_scrub_length;
2562 } else {
2563 ars_start = ars_status->address;
2564 ars_len = ars_status->length;
2565 }
2566 spa = nfit_spa->spa;
2567
2568 if (!init_ars_done) {
2569 init_ars_done = true;
2570 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2571 ars_start, ars_len);
2572 }
2573 if (ars_start <= spa->address && ars_start + ars_len
2574 >= spa->address + spa->length)
2575 acpi_nfit_register_region(acpi_desc, nfit_spa);
1f7df6f8 2576 }
1cf03c00
DW
2577
2578 /*
2579 * For all the ranges not covered by an initial scrub we still
2580 * want to see if there are errors, but it's ok to discover them
2581 * asynchronously.
2582 */
2583 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2584 /*
2585 * Flag all the ranges that still need scrubbing, but
2586 * register them now to make data available.
2587 */
37b137ff
VV
2588 if (!nfit_spa->nd_region) {
2589 nfit_spa->ars_required = 1;
1cf03c00 2590 acpi_nfit_register_region(acpi_desc, nfit_spa);
37b137ff 2591 }
1cf03c00 2592 }
9ccaed4b 2593 acpi_desc->init_complete = 1;
1cf03c00
DW
2594
2595 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2596 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
37b137ff
VV
2597 acpi_desc->scrub_count++;
2598 if (acpi_desc->scrub_count_state)
2599 sysfs_notify_dirent(acpi_desc->scrub_count_state);
1cf03c00
DW
2600 mutex_unlock(&acpi_desc->init_mutex);
2601}
2602
2603static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2604{
2605 struct nfit_spa *nfit_spa;
2606 int rc;
2607
2608 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2609 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2610 /* BLK regions don't need to wait for ars results */
2611 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2612 if (rc)
2613 return rc;
2614 }
2615
fbabd829
DW
2616 if (!acpi_desc->cancel)
2617 queue_work(nfit_wq, &acpi_desc->work);
1f7df6f8
DW
2618 return 0;
2619}
2620
20985164
VV
2621static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2622 struct nfit_table_prev *prev)
2623{
2624 struct device *dev = acpi_desc->dev;
2625
2626 if (!list_empty(&prev->spas) ||
2627 !list_empty(&prev->memdevs) ||
2628 !list_empty(&prev->dcrs) ||
2629 !list_empty(&prev->bdws) ||
2630 !list_empty(&prev->idts) ||
2631 !list_empty(&prev->flushes)) {
2632 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2633 return -ENXIO;
2634 }
2635 return 0;
2636}
2637
37b137ff
VV
2638static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
2639{
2640 struct device *dev = acpi_desc->dev;
2641 struct kernfs_node *nfit;
2642 struct device *bus_dev;
2643
2644 if (!ars_supported(acpi_desc->nvdimm_bus))
2645 return 0;
2646
2647 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2648 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
2649 if (!nfit) {
2650 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
2651 return -ENODEV;
2652 }
2653 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
2654 sysfs_put(nfit);
2655 if (!acpi_desc->scrub_count_state) {
2656 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
2657 return -ENODEV;
2658 }
2659
2660 return 0;
2661}
2662
fbabd829 2663static void acpi_nfit_unregister(void *data)
58cd71b4
DW
2664{
2665 struct acpi_nfit_desc *acpi_desc = data;
2666
58cd71b4 2667 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
58cd71b4
DW
2668}
2669
e7a11b44 2670int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
b94d5230
DW
2671{
2672 struct device *dev = acpi_desc->dev;
20985164 2673 struct nfit_table_prev prev;
b94d5230 2674 const void *end;
1f7df6f8 2675 int rc;
b94d5230 2676
58cd71b4 2677 if (!acpi_desc->nvdimm_bus) {
37b137ff
VV
2678 acpi_nfit_init_dsms(acpi_desc);
2679
58cd71b4
DW
2680 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
2681 &acpi_desc->nd_desc);
2682 if (!acpi_desc->nvdimm_bus)
2683 return -ENOMEM;
37b137ff 2684
fbabd829 2685 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
58cd71b4
DW
2686 acpi_desc);
2687 if (rc)
2688 return rc;
37b137ff
VV
2689
2690 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
2691 if (rc)
2692 return rc;
6839a6d9
VV
2693
2694 /* register this acpi_desc for mce notifications */
2695 mutex_lock(&acpi_desc_lock);
2696 list_add_tail(&acpi_desc->list, &acpi_descs);
2697 mutex_unlock(&acpi_desc_lock);
58cd71b4
DW
2698 }
2699
20985164
VV
2700 mutex_lock(&acpi_desc->init_mutex);
2701
2702 INIT_LIST_HEAD(&prev.spas);
2703 INIT_LIST_HEAD(&prev.memdevs);
2704 INIT_LIST_HEAD(&prev.dcrs);
2705 INIT_LIST_HEAD(&prev.bdws);
2706 INIT_LIST_HEAD(&prev.idts);
2707 INIT_LIST_HEAD(&prev.flushes);
2708
2709 list_cut_position(&prev.spas, &acpi_desc->spas,
2710 acpi_desc->spas.prev);
2711 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2712 acpi_desc->memdevs.prev);
2713 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2714 acpi_desc->dcrs.prev);
2715 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2716 acpi_desc->bdws.prev);
2717 list_cut_position(&prev.idts, &acpi_desc->idts,
2718 acpi_desc->idts.prev);
2719 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2720 acpi_desc->flushes.prev);
b94d5230 2721
b94d5230 2722 end = data + sz;
b94d5230 2723 while (!IS_ERR_OR_NULL(data))
20985164 2724 data = add_table(acpi_desc, &prev, data, end);
b94d5230
DW
2725
2726 if (IS_ERR(data)) {
2727 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2728 PTR_ERR(data));
20985164
VV
2729 rc = PTR_ERR(data);
2730 goto out_unlock;
b94d5230
DW
2731 }
2732
20985164
VV
2733 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2734 if (rc)
2735 goto out_unlock;
2736
81ed4e36
DW
2737 rc = nfit_mem_init(acpi_desc);
2738 if (rc)
20985164 2739 goto out_unlock;
62232e45 2740
1f7df6f8
DW
2741 rc = acpi_nfit_register_dimms(acpi_desc);
2742 if (rc)
20985164
VV
2743 goto out_unlock;
2744
2745 rc = acpi_nfit_register_regions(acpi_desc);
1f7df6f8 2746
20985164
VV
2747 out_unlock:
2748 mutex_unlock(&acpi_desc->init_mutex);
2749 return rc;
b94d5230 2750}
6bc75619 2751EXPORT_SYMBOL_GPL(acpi_nfit_init);
b94d5230 2752
7ae0fa43
DW
2753struct acpi_nfit_flush_work {
2754 struct work_struct work;
2755 struct completion cmp;
2756};
2757
2758static void flush_probe(struct work_struct *work)
2759{
2760 struct acpi_nfit_flush_work *flush;
2761
2762 flush = container_of(work, typeof(*flush), work);
2763 complete(&flush->cmp);
2764}
2765
2766static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2767{
2768 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2769 struct device *dev = acpi_desc->dev;
2770 struct acpi_nfit_flush_work flush;
e471486c 2771 int rc;
7ae0fa43
DW
2772
2773 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2774 device_lock(dev);
2775 device_unlock(dev);
2776
9ccaed4b
DW
2777 /* bounce the init_mutex to make init_complete valid */
2778 mutex_lock(&acpi_desc->init_mutex);
fbabd829
DW
2779 if (acpi_desc->cancel || acpi_desc->init_complete) {
2780 mutex_unlock(&acpi_desc->init_mutex);
9ccaed4b 2781 return 0;
fbabd829 2782 }
9ccaed4b 2783
7ae0fa43
DW
2784 /*
2785 * Scrub work could take 10s of seconds, userspace may give up so we
2786 * need to be interruptible while waiting.
2787 */
2788 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2789 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2790 queue_work(nfit_wq, &flush.work);
fbabd829 2791 mutex_unlock(&acpi_desc->init_mutex);
e471486c
DW
2792
2793 rc = wait_for_completion_interruptible(&flush.cmp);
2794 cancel_work_sync(&flush.work);
2795 return rc;
7ae0fa43
DW
2796}
2797
87bf572e
DW
2798static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2799 struct nvdimm *nvdimm, unsigned int cmd)
2800{
2801 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2802
2803 if (nvdimm)
2804 return 0;
2805 if (cmd != ND_CMD_ARS_START)
2806 return 0;
2807
2808 /*
2809 * The kernel and userspace may race to initiate a scrub, but
2810 * the scrub thread is prepared to lose that initial race. It
2811 * just needs guarantees that any ars it initiates are not
2812 * interrupted by any intervening start reqeusts from userspace.
2813 */
2814 if (work_busy(&acpi_desc->work))
2815 return -EBUSY;
2816
2817 return 0;
2818}
2819
6839a6d9 2820int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
37b137ff
VV
2821{
2822 struct device *dev = acpi_desc->dev;
2823 struct nfit_spa *nfit_spa;
2824
2825 if (work_busy(&acpi_desc->work))
2826 return -EBUSY;
2827
fbabd829
DW
2828 mutex_lock(&acpi_desc->init_mutex);
2829 if (acpi_desc->cancel) {
2830 mutex_unlock(&acpi_desc->init_mutex);
37b137ff 2831 return 0;
fbabd829 2832 }
37b137ff 2833
37b137ff
VV
2834 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2835 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2836
2837 if (nfit_spa_type(spa) != NFIT_SPA_PM)
2838 continue;
2839
2840 nfit_spa->ars_required = 1;
2841 }
2842 queue_work(nfit_wq, &acpi_desc->work);
2843 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
2844 mutex_unlock(&acpi_desc->init_mutex);
2845
2846 return 0;
2847}
2848
a61fe6f7 2849void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
b94d5230
DW
2850{
2851 struct nvdimm_bus_descriptor *nd_desc;
b94d5230
DW
2852
2853 dev_set_drvdata(dev, acpi_desc);
2854 acpi_desc->dev = dev;
6bc75619 2855 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
b94d5230
DW
2856 nd_desc = &acpi_desc->nd_desc;
2857 nd_desc->provider_name = "ACPI.NFIT";
bc9775d8 2858 nd_desc->module = THIS_MODULE;
b94d5230 2859 nd_desc->ndctl = acpi_nfit_ctl;
7ae0fa43 2860 nd_desc->flush_probe = acpi_nfit_flush_probe;
87bf572e 2861 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
45def22c 2862 nd_desc->attr_groups = acpi_nfit_attribute_groups;
b94d5230 2863
20985164
VV
2864 INIT_LIST_HEAD(&acpi_desc->spas);
2865 INIT_LIST_HEAD(&acpi_desc->dcrs);
2866 INIT_LIST_HEAD(&acpi_desc->bdws);
2867 INIT_LIST_HEAD(&acpi_desc->idts);
2868 INIT_LIST_HEAD(&acpi_desc->flushes);
2869 INIT_LIST_HEAD(&acpi_desc->memdevs);
2870 INIT_LIST_HEAD(&acpi_desc->dimms);
6839a6d9 2871 INIT_LIST_HEAD(&acpi_desc->list);
20985164 2872 mutex_init(&acpi_desc->init_mutex);
1cf03c00 2873 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
20985164 2874}
a61fe6f7 2875EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
20985164 2876
3c87f372
DW
2877static void acpi_nfit_put_table(void *table)
2878{
2879 acpi_put_table(table);
2880}
2881
fbabd829
DW
2882void acpi_nfit_shutdown(void *data)
2883{
2884 struct acpi_nfit_desc *acpi_desc = data;
2885 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2886
2887 /*
2888 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2889 * race teardown
2890 */
2891 mutex_lock(&acpi_desc_lock);
2892 list_del(&acpi_desc->list);
2893 mutex_unlock(&acpi_desc_lock);
2894
2895 mutex_lock(&acpi_desc->init_mutex);
2896 acpi_desc->cancel = 1;
2897 mutex_unlock(&acpi_desc->init_mutex);
2898
2899 /*
2900 * Bounce the nvdimm bus lock to make sure any in-flight
2901 * acpi_nfit_ars_rescan() submissions have had a chance to
2902 * either submit or see ->cancel set.
2903 */
2904 device_lock(bus_dev);
2905 device_unlock(bus_dev);
2906
2907 flush_workqueue(nfit_wq);
2908}
2909EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
2910
20985164
VV
2911static int acpi_nfit_add(struct acpi_device *adev)
2912{
2913 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2914 struct acpi_nfit_desc *acpi_desc;
2915 struct device *dev = &adev->dev;
2916 struct acpi_table_header *tbl;
2917 acpi_status status = AE_OK;
2918 acpi_size sz;
31932041 2919 int rc = 0;
20985164 2920
6b11d1d6 2921 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
20985164
VV
2922 if (ACPI_FAILURE(status)) {
2923 /* This is ok, we could have an nvdimm hotplugged later */
2924 dev_dbg(dev, "failed to find NFIT at startup\n");
2925 return 0;
2926 }
3c87f372
DW
2927
2928 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
2929 if (rc)
2930 return rc;
6b11d1d6 2931 sz = tbl->length;
20985164 2932
a61fe6f7
DW
2933 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2934 if (!acpi_desc)
2935 return -ENOMEM;
2936 acpi_nfit_desc_init(acpi_desc, &adev->dev);
20985164 2937
e7a11b44 2938 /* Save the acpi header for exporting the revision via sysfs */
6b577c9d 2939 acpi_desc->acpi_header = *tbl;
20985164
VV
2940
2941 /* Evaluate _FIT and override with that if present */
2942 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2943 if (ACPI_SUCCESS(status) && buf.length > 0) {
e7a11b44
DW
2944 union acpi_object *obj = buf.pointer;
2945
2946 if (obj->type == ACPI_TYPE_BUFFER)
2947 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2948 obj->buffer.length);
2949 else
6b577c9d
LK
2950 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2951 __func__, (int) obj->type);
31932041
DW
2952 kfree(buf.pointer);
2953 } else
e7a11b44
DW
2954 /* skip over the lead-in header table */
2955 rc = acpi_nfit_init(acpi_desc, (void *) tbl
2956 + sizeof(struct acpi_table_nfit),
2957 sz - sizeof(struct acpi_table_nfit));
fbabd829
DW
2958
2959 if (rc)
2960 return rc;
2961 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
b94d5230
DW
2962}
2963
2964static int acpi_nfit_remove(struct acpi_device *adev)
2965{
fbabd829 2966 /* see acpi_nfit_unregister */
b94d5230
DW
2967 return 0;
2968}
2969
c14a868a 2970void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
20985164 2971{
c14a868a 2972 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
20985164 2973 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
e7a11b44 2974 union acpi_object *obj;
20985164
VV
2975 acpi_status status;
2976 int ret;
2977
2978 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2979
c09f1218
VV
2980 if (event != NFIT_NOTIFY_UPDATE)
2981 return;
2982
20985164
VV
2983 if (!dev->driver) {
2984 /* dev->driver may be null if we're being removed */
2985 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
c14a868a 2986 return;
20985164
VV
2987 }
2988
2989 if (!acpi_desc) {
a61fe6f7
DW
2990 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2991 if (!acpi_desc)
c14a868a
DW
2992 return;
2993 acpi_nfit_desc_init(acpi_desc, dev);
7ae0fa43
DW
2994 } else {
2995 /*
2996 * Finish previous registration before considering new
2997 * regions.
2998 */
2999 flush_workqueue(nfit_wq);
20985164
VV
3000 }
3001
3002 /* Evaluate _FIT */
c14a868a 3003 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
20985164
VV
3004 if (ACPI_FAILURE(status)) {
3005 dev_err(dev, "failed to evaluate _FIT\n");
c14a868a 3006 return;
20985164
VV
3007 }
3008
6b577c9d
LK
3009 obj = buf.pointer;
3010 if (obj->type == ACPI_TYPE_BUFFER) {
e7a11b44
DW
3011 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3012 obj->buffer.length);
31932041 3013 if (ret)
6b577c9d 3014 dev_err(dev, "failed to merge updated NFIT\n");
31932041 3015 } else
6b577c9d 3016 dev_err(dev, "Invalid _FIT\n");
20985164 3017 kfree(buf.pointer);
c14a868a
DW
3018}
3019EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
20985164 3020
c14a868a
DW
3021static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3022{
3023 device_lock(&adev->dev);
3024 __acpi_nfit_notify(&adev->dev, adev->handle, event);
3025 device_unlock(&adev->dev);
20985164
VV
3026}
3027
b94d5230
DW
3028static const struct acpi_device_id acpi_nfit_ids[] = {
3029 { "ACPI0012", 0 },
3030 { "", 0 },
3031};
3032MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3033
3034static struct acpi_driver acpi_nfit_driver = {
3035 .name = KBUILD_MODNAME,
3036 .ids = acpi_nfit_ids,
3037 .ops = {
3038 .add = acpi_nfit_add,
3039 .remove = acpi_nfit_remove,
20985164 3040 .notify = acpi_nfit_notify,
b94d5230
DW
3041 },
3042};
3043
3044static __init int nfit_init(void)
3045{
3046 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3047 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3048 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3049 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3050 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3051 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3052 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3053
41c8bdb3
AS
3054 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3055 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3056 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3057 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3058 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3059 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3060 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3061 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3062 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3063 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3064 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3065 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3066 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
b94d5230 3067
7ae0fa43
DW
3068 nfit_wq = create_singlethread_workqueue("nfit");
3069 if (!nfit_wq)
3070 return -ENOMEM;
3071
6839a6d9
VV
3072 nfit_mce_register();
3073
b94d5230
DW
3074 return acpi_bus_register_driver(&acpi_nfit_driver);
3075}
3076
3077static __exit void nfit_exit(void)
3078{
6839a6d9 3079 nfit_mce_unregister();
b94d5230 3080 acpi_bus_unregister_driver(&acpi_nfit_driver);
7ae0fa43 3081 destroy_workqueue(nfit_wq);
6839a6d9 3082 WARN_ON(!list_empty(&acpi_descs));
b94d5230
DW
3083}
3084
3085module_init(nfit_init);
3086module_exit(nfit_exit);
3087MODULE_LICENSE("GPL v2");
3088MODULE_AUTHOR("Intel Corporation");