]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/acpi/nfit/core.c
acpi, nfit: allow override of built-in bitmasks for nvdimm DSMs
[mirror_ubuntu-artful-kernel.git] / drivers / acpi / nfit / core.c
CommitLineData
b94d5230
DW
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
047fc8a1 16#include <linux/mutex.h>
62232e45 17#include <linux/ndctl.h>
37b137ff 18#include <linux/sysfs.h>
0caeef63 19#include <linux/delay.h>
b94d5230
DW
20#include <linux/list.h>
21#include <linux/acpi.h>
eaf96153 22#include <linux/sort.h>
c2ad2954 23#include <linux/pmem.h>
047fc8a1 24#include <linux/io.h>
1cf03c00 25#include <linux/nd.h>
96601adb 26#include <asm/cacheflush.h>
b94d5230
DW
27#include "nfit.h"
28
047fc8a1
RZ
29/*
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
31 * irrelevant.
32 */
2f8e2c87 33#include <linux/io-64-nonatomic-hi-lo.h>
047fc8a1 34
4d88a97a
DW
35static bool force_enable_dimms;
36module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
37MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
38
1cf03c00
DW
39static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
40module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
41MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
42
43/* after three payloads of overflow, it's dead jim */
44static unsigned int scrub_overflow_abort = 3;
45module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
46MODULE_PARM_DESC(scrub_overflow_abort,
47 "Number of times we overflow ARS results before abort");
48
87554098
DW
49static bool disable_vendor_specific;
50module_param(disable_vendor_specific, bool, S_IRUGO);
51MODULE_PARM_DESC(disable_vendor_specific,
52 "Limit commands to the publicly specified set\n");
53
095ab4b3
LK
54static unsigned long override_dsm_mask;
55module_param(override_dsm_mask, ulong, S_IRUGO);
56MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
57
6839a6d9
VV
58LIST_HEAD(acpi_descs);
59DEFINE_MUTEX(acpi_desc_lock);
60
7ae0fa43
DW
61static struct workqueue_struct *nfit_wq;
62
20985164
VV
63struct nfit_table_prev {
64 struct list_head spas;
65 struct list_head memdevs;
66 struct list_head dcrs;
67 struct list_head bdws;
68 struct list_head idts;
69 struct list_head flushes;
70};
71
b94d5230
DW
72static u8 nfit_uuid[NFIT_UUID_MAX][16];
73
6bc75619 74const u8 *to_nfit_uuid(enum nfit_uuids id)
b94d5230
DW
75{
76 return nfit_uuid[id];
77}
6bc75619 78EXPORT_SYMBOL(to_nfit_uuid);
b94d5230 79
62232e45
DW
80static struct acpi_nfit_desc *to_acpi_nfit_desc(
81 struct nvdimm_bus_descriptor *nd_desc)
82{
83 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
84}
85
86static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
87{
88 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
89
90 /*
91 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
92 * acpi_device.
93 */
94 if (!nd_desc->provider_name
95 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
96 return NULL;
97
98 return to_acpi_device(acpi_desc->dev);
99}
100
d6eb270c 101static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
aef25338 102{
d4f32367 103 struct nd_cmd_clear_error *clear_err;
aef25338 104 struct nd_cmd_ars_status *ars_status;
aef25338
DW
105 u16 flags;
106
107 switch (cmd) {
108 case ND_CMD_ARS_CAP:
11294d63 109 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
aef25338
DW
110 return -ENOTTY;
111
112 /* Command failed */
11294d63 113 if (status & 0xffff)
aef25338
DW
114 return -EIO;
115
116 /* No supported scan types for this range */
117 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
11294d63 118 if ((status >> 16 & flags) == 0)
aef25338 119 return -ENOTTY;
9a901f54 120 return 0;
aef25338 121 case ND_CMD_ARS_START:
aef25338 122 /* ARS is in progress */
11294d63 123 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
aef25338
DW
124 return -EBUSY;
125
126 /* Command failed */
11294d63 127 if (status & 0xffff)
aef25338 128 return -EIO;
9a901f54 129 return 0;
aef25338
DW
130 case ND_CMD_ARS_STATUS:
131 ars_status = buf;
132 /* Command failed */
11294d63 133 if (status & 0xffff)
aef25338
DW
134 return -EIO;
135 /* Check extended status (Upper two bytes) */
11294d63 136 if (status == NFIT_ARS_STATUS_DONE)
aef25338
DW
137 return 0;
138
139 /* ARS is in progress */
11294d63 140 if (status == NFIT_ARS_STATUS_BUSY)
aef25338
DW
141 return -EBUSY;
142
143 /* No ARS performed for the current boot */
11294d63 144 if (status == NFIT_ARS_STATUS_NONE)
aef25338
DW
145 return -EAGAIN;
146
147 /*
148 * ARS interrupted, either we overflowed or some other
149 * agent wants the scan to stop. If we didn't overflow
150 * then just continue with the returned results.
151 */
11294d63 152 if (status == NFIT_ARS_STATUS_INTR) {
82aa37cf
DW
153 if (ars_status->out_length >= 40 && (ars_status->flags
154 & NFIT_ARS_F_OVERFLOW))
aef25338
DW
155 return -ENOSPC;
156 return 0;
157 }
158
159 /* Unknown status */
11294d63 160 if (status >> 16)
aef25338 161 return -EIO;
9a901f54 162 return 0;
d4f32367
DW
163 case ND_CMD_CLEAR_ERROR:
164 clear_err = buf;
11294d63 165 if (status & 0xffff)
d4f32367
DW
166 return -EIO;
167 if (!clear_err->cleared)
168 return -EIO;
169 if (clear_err->length > clear_err->cleared)
170 return clear_err->cleared;
9a901f54 171 return 0;
aef25338
DW
172 default:
173 break;
174 }
175
11294d63
DW
176 /* all other non-zero status results in an error */
177 if (status)
178 return -EIO;
aef25338
DW
179 return 0;
180}
181
d6eb270c
DW
182static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
183 u32 status)
184{
185 if (!nvdimm)
186 return xlat_bus_status(buf, cmd, status);
187 if (status)
188 return -EIO;
189 return 0;
190}
191
a7de92da
DW
192int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
193 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
b94d5230 194{
62232e45 195 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
62232e45 196 union acpi_object in_obj, in_buf, *out_obj;
31eca76b 197 const struct nd_cmd_desc *desc = NULL;
62232e45 198 struct device *dev = acpi_desc->dev;
31eca76b 199 struct nd_cmd_pkg *call_pkg = NULL;
62232e45 200 const char *cmd_name, *dimm_name;
31eca76b 201 unsigned long cmd_mask, dsm_mask;
11294d63 202 u32 offset, fw_status = 0;
62232e45 203 acpi_handle handle;
31eca76b 204 unsigned int func;
62232e45 205 const u8 *uuid;
62232e45
DW
206 int rc, i;
207
31eca76b
DW
208 func = cmd;
209 if (cmd == ND_CMD_CALL) {
210 call_pkg = buf;
211 func = call_pkg->nd_command;
212 }
213
62232e45
DW
214 if (nvdimm) {
215 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
216 struct acpi_device *adev = nfit_mem->adev;
217
218 if (!adev)
219 return -ENOTTY;
31eca76b
DW
220 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
221 return -ENOTTY;
222
047fc8a1 223 dimm_name = nvdimm_name(nvdimm);
62232e45 224 cmd_name = nvdimm_cmd_name(cmd);
e3654eca 225 cmd_mask = nvdimm_cmd_mask(nvdimm);
62232e45
DW
226 dsm_mask = nfit_mem->dsm_mask;
227 desc = nd_cmd_dimm_desc(cmd);
31eca76b 228 uuid = to_nfit_uuid(nfit_mem->family);
62232e45
DW
229 handle = adev->handle;
230 } else {
231 struct acpi_device *adev = to_acpi_dev(acpi_desc);
232
233 cmd_name = nvdimm_bus_cmd_name(cmd);
e3654eca 234 cmd_mask = nd_desc->cmd_mask;
31eca76b 235 dsm_mask = cmd_mask;
62232e45
DW
236 desc = nd_cmd_bus_desc(cmd);
237 uuid = to_nfit_uuid(NFIT_DEV_BUS);
238 handle = adev->handle;
239 dimm_name = "bus";
240 }
241
242 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
243 return -ENOTTY;
244
31eca76b 245 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
62232e45
DW
246 return -ENOTTY;
247
248 in_obj.type = ACPI_TYPE_PACKAGE;
249 in_obj.package.count = 1;
250 in_obj.package.elements = &in_buf;
251 in_buf.type = ACPI_TYPE_BUFFER;
252 in_buf.buffer.pointer = buf;
253 in_buf.buffer.length = 0;
254
255 /* libnvdimm has already validated the input envelope */
256 for (i = 0; i < desc->in_num; i++)
257 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
258 i, buf);
259
31eca76b
DW
260 if (call_pkg) {
261 /* skip over package wrapper */
262 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
263 in_buf.buffer.length = call_pkg->nd_size_in;
264 }
265
62232e45 266 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
31eca76b
DW
267 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
268 __func__, dimm_name, cmd, func,
269 in_buf.buffer.length);
270 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
271 in_buf.buffer.pointer,
272 min_t(u32, 256, in_buf.buffer.length), true);
62232e45
DW
273 }
274
31eca76b 275 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
62232e45
DW
276 if (!out_obj) {
277 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
278 cmd_name);
279 return -EINVAL;
280 }
281
31eca76b
DW
282 if (call_pkg) {
283 call_pkg->nd_fw_size = out_obj->buffer.length;
284 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
285 out_obj->buffer.pointer,
286 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
287
288 ACPI_FREE(out_obj);
289 /*
290 * Need to support FW function w/o known size in advance.
291 * Caller can determine required size based upon nd_fw_size.
292 * If we return an error (like elsewhere) then caller wouldn't
293 * be able to rely upon data returned to make calculation.
294 */
295 return 0;
296 }
297
62232e45
DW
298 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
299 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
300 __func__, dimm_name, cmd_name, out_obj->type);
301 rc = -EINVAL;
302 goto out;
303 }
304
305 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
306 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
307 dimm_name, cmd_name, out_obj->buffer.length);
308 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
309 4, out_obj->buffer.pointer, min_t(u32, 128,
310 out_obj->buffer.length), true);
311 }
312
313 for (i = 0, offset = 0; i < desc->out_num; i++) {
314 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
efda1b5d
DW
315 (u32 *) out_obj->buffer.pointer,
316 out_obj->buffer.length - offset);
62232e45
DW
317
318 if (offset + out_size > out_obj->buffer.length) {
319 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
320 __func__, dimm_name, cmd_name, i);
321 break;
322 }
323
324 if (in_buf.buffer.length + offset + out_size > buf_len) {
325 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
326 __func__, dimm_name, cmd_name, i);
327 rc = -ENXIO;
328 goto out;
329 }
330 memcpy(buf + in_buf.buffer.length + offset,
331 out_obj->buffer.pointer + offset, out_size);
332 offset += out_size;
333 }
11294d63
DW
334
335 /*
336 * Set fw_status for all the commands with a known format to be
337 * later interpreted by xlat_status().
338 */
339 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
340 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
341 fw_status = *(u32 *) out_obj->buffer.pointer;
342
62232e45
DW
343 if (offset + in_buf.buffer.length < buf_len) {
344 if (i >= 1) {
345 /*
346 * status valid, return the number of bytes left
347 * unfilled in the output buffer
348 */
349 rc = buf_len - offset - in_buf.buffer.length;
aef25338 350 if (cmd_rc)
d6eb270c
DW
351 *cmd_rc = xlat_status(nvdimm, buf, cmd,
352 fw_status);
62232e45
DW
353 } else {
354 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
355 __func__, dimm_name, cmd_name, buf_len,
356 offset);
357 rc = -ENXIO;
358 }
2eea6582 359 } else {
62232e45 360 rc = 0;
2eea6582 361 if (cmd_rc)
d6eb270c 362 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
2eea6582 363 }
62232e45
DW
364
365 out:
366 ACPI_FREE(out_obj);
367
368 return rc;
b94d5230 369}
a7de92da 370EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
b94d5230
DW
371
372static const char *spa_type_name(u16 type)
373{
374 static const char *to_name[] = {
375 [NFIT_SPA_VOLATILE] = "volatile",
376 [NFIT_SPA_PM] = "pmem",
377 [NFIT_SPA_DCR] = "dimm-control-region",
378 [NFIT_SPA_BDW] = "block-data-window",
379 [NFIT_SPA_VDISK] = "volatile-disk",
380 [NFIT_SPA_VCD] = "volatile-cd",
381 [NFIT_SPA_PDISK] = "persistent-disk",
382 [NFIT_SPA_PCD] = "persistent-cd",
383
384 };
385
386 if (type > NFIT_SPA_PCD)
387 return "unknown";
388
389 return to_name[type];
390}
391
6839a6d9 392int nfit_spa_type(struct acpi_nfit_system_address *spa)
b94d5230
DW
393{
394 int i;
395
396 for (i = 0; i < NFIT_UUID_MAX; i++)
397 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
398 return i;
399 return -1;
400}
401
402static bool add_spa(struct acpi_nfit_desc *acpi_desc,
20985164 403 struct nfit_table_prev *prev,
b94d5230
DW
404 struct acpi_nfit_system_address *spa)
405{
406 struct device *dev = acpi_desc->dev;
20985164
VV
407 struct nfit_spa *nfit_spa;
408
31932041
DW
409 if (spa->header.length != sizeof(*spa))
410 return false;
411
20985164 412 list_for_each_entry(nfit_spa, &prev->spas, list) {
31932041 413 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
20985164
VV
414 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
415 return true;
416 }
417 }
b94d5230 418
31932041
DW
419 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
420 GFP_KERNEL);
b94d5230
DW
421 if (!nfit_spa)
422 return false;
423 INIT_LIST_HEAD(&nfit_spa->list);
31932041 424 memcpy(nfit_spa->spa, spa, sizeof(*spa));
b94d5230
DW
425 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
426 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
427 spa->range_index,
428 spa_type_name(nfit_spa_type(spa)));
429 return true;
430}
431
432static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
20985164 433 struct nfit_table_prev *prev,
b94d5230
DW
434 struct acpi_nfit_memory_map *memdev)
435{
436 struct device *dev = acpi_desc->dev;
20985164 437 struct nfit_memdev *nfit_memdev;
b94d5230 438
31932041
DW
439 if (memdev->header.length != sizeof(*memdev))
440 return false;
441
20985164 442 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
31932041 443 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
20985164
VV
444 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
445 return true;
446 }
447
31932041
DW
448 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
449 GFP_KERNEL);
b94d5230
DW
450 if (!nfit_memdev)
451 return false;
452 INIT_LIST_HEAD(&nfit_memdev->list);
31932041 453 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
b94d5230
DW
454 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
455 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
456 __func__, memdev->device_handle, memdev->range_index,
457 memdev->region_index);
458 return true;
459}
460
31932041
DW
461/*
462 * An implementation may provide a truncated control region if no block windows
463 * are defined.
464 */
465static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
466{
467 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
468 window_size))
469 return 0;
470 if (dcr->windows)
471 return sizeof(*dcr);
472 return offsetof(struct acpi_nfit_control_region, window_size);
473}
474
b94d5230 475static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
20985164 476 struct nfit_table_prev *prev,
b94d5230
DW
477 struct acpi_nfit_control_region *dcr)
478{
479 struct device *dev = acpi_desc->dev;
20985164
VV
480 struct nfit_dcr *nfit_dcr;
481
31932041
DW
482 if (!sizeof_dcr(dcr))
483 return false;
484
20985164 485 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
31932041 486 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
20985164
VV
487 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
488 return true;
489 }
b94d5230 490
31932041
DW
491 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
492 GFP_KERNEL);
b94d5230
DW
493 if (!nfit_dcr)
494 return false;
495 INIT_LIST_HEAD(&nfit_dcr->list);
31932041 496 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
b94d5230
DW
497 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
498 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
499 dcr->region_index, dcr->windows);
500 return true;
501}
502
503static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
20985164 504 struct nfit_table_prev *prev,
b94d5230
DW
505 struct acpi_nfit_data_region *bdw)
506{
507 struct device *dev = acpi_desc->dev;
20985164
VV
508 struct nfit_bdw *nfit_bdw;
509
31932041
DW
510 if (bdw->header.length != sizeof(*bdw))
511 return false;
20985164 512 list_for_each_entry(nfit_bdw, &prev->bdws, list)
31932041 513 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
20985164
VV
514 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
515 return true;
516 }
b94d5230 517
31932041
DW
518 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
519 GFP_KERNEL);
b94d5230
DW
520 if (!nfit_bdw)
521 return false;
522 INIT_LIST_HEAD(&nfit_bdw->list);
31932041 523 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
b94d5230
DW
524 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
525 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
526 bdw->region_index, bdw->windows);
527 return true;
528}
529
31932041
DW
530static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
531{
532 if (idt->header.length < sizeof(*idt))
533 return 0;
534 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
535}
536
047fc8a1 537static bool add_idt(struct acpi_nfit_desc *acpi_desc,
20985164 538 struct nfit_table_prev *prev,
047fc8a1
RZ
539 struct acpi_nfit_interleave *idt)
540{
541 struct device *dev = acpi_desc->dev;
20985164
VV
542 struct nfit_idt *nfit_idt;
543
31932041
DW
544 if (!sizeof_idt(idt))
545 return false;
546
547 list_for_each_entry(nfit_idt, &prev->idts, list) {
548 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
549 continue;
550
551 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
20985164
VV
552 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
553 return true;
554 }
31932041 555 }
047fc8a1 556
31932041
DW
557 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
558 GFP_KERNEL);
047fc8a1
RZ
559 if (!nfit_idt)
560 return false;
561 INIT_LIST_HEAD(&nfit_idt->list);
31932041 562 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
047fc8a1
RZ
563 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
564 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
565 idt->interleave_index, idt->line_count);
566 return true;
567}
568
31932041
DW
569static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
570{
571 if (flush->header.length < sizeof(*flush))
572 return 0;
573 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
574}
575
c2ad2954 576static bool add_flush(struct acpi_nfit_desc *acpi_desc,
20985164 577 struct nfit_table_prev *prev,
c2ad2954
RZ
578 struct acpi_nfit_flush_address *flush)
579{
580 struct device *dev = acpi_desc->dev;
20985164 581 struct nfit_flush *nfit_flush;
c2ad2954 582
31932041
DW
583 if (!sizeof_flush(flush))
584 return false;
585
586 list_for_each_entry(nfit_flush, &prev->flushes, list) {
587 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
588 continue;
589
590 if (memcmp(nfit_flush->flush, flush,
591 sizeof_flush(flush)) == 0) {
20985164
VV
592 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
593 return true;
594 }
31932041 595 }
20985164 596
31932041
DW
597 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
598 + sizeof_flush(flush), GFP_KERNEL);
c2ad2954
RZ
599 if (!nfit_flush)
600 return false;
601 INIT_LIST_HEAD(&nfit_flush->list);
31932041 602 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
c2ad2954
RZ
603 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
604 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
605 flush->device_handle, flush->hint_count);
606 return true;
607}
608
20985164
VV
609static void *add_table(struct acpi_nfit_desc *acpi_desc,
610 struct nfit_table_prev *prev, void *table, const void *end)
b94d5230
DW
611{
612 struct device *dev = acpi_desc->dev;
613 struct acpi_nfit_header *hdr;
614 void *err = ERR_PTR(-ENOMEM);
615
616 if (table >= end)
617 return NULL;
618
619 hdr = table;
564d5011
VV
620 if (!hdr->length) {
621 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
622 hdr->type);
623 return NULL;
624 }
625
b94d5230
DW
626 switch (hdr->type) {
627 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
20985164 628 if (!add_spa(acpi_desc, prev, table))
b94d5230
DW
629 return err;
630 break;
631 case ACPI_NFIT_TYPE_MEMORY_MAP:
20985164 632 if (!add_memdev(acpi_desc, prev, table))
b94d5230
DW
633 return err;
634 break;
635 case ACPI_NFIT_TYPE_CONTROL_REGION:
20985164 636 if (!add_dcr(acpi_desc, prev, table))
b94d5230
DW
637 return err;
638 break;
639 case ACPI_NFIT_TYPE_DATA_REGION:
20985164 640 if (!add_bdw(acpi_desc, prev, table))
b94d5230
DW
641 return err;
642 break;
b94d5230 643 case ACPI_NFIT_TYPE_INTERLEAVE:
20985164 644 if (!add_idt(acpi_desc, prev, table))
047fc8a1 645 return err;
b94d5230
DW
646 break;
647 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
20985164 648 if (!add_flush(acpi_desc, prev, table))
c2ad2954 649 return err;
b94d5230
DW
650 break;
651 case ACPI_NFIT_TYPE_SMBIOS:
652 dev_dbg(dev, "%s: smbios\n", __func__);
653 break;
654 default:
655 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
656 break;
657 }
658
659 return table + hdr->length;
660}
661
662static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
663 struct nfit_mem *nfit_mem)
664{
665 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
666 u16 dcr = nfit_mem->dcr->region_index;
667 struct nfit_spa *nfit_spa;
668
669 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
670 u16 range_index = nfit_spa->spa->range_index;
671 int type = nfit_spa_type(nfit_spa->spa);
672 struct nfit_memdev *nfit_memdev;
673
674 if (type != NFIT_SPA_BDW)
675 continue;
676
677 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
678 if (nfit_memdev->memdev->range_index != range_index)
679 continue;
680 if (nfit_memdev->memdev->device_handle != device_handle)
681 continue;
682 if (nfit_memdev->memdev->region_index != dcr)
683 continue;
684
685 nfit_mem->spa_bdw = nfit_spa->spa;
686 return;
687 }
688 }
689
690 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
691 nfit_mem->spa_dcr->range_index);
692 nfit_mem->bdw = NULL;
693}
694
6697b2cf 695static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
b94d5230
DW
696 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
697{
698 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
047fc8a1 699 struct nfit_memdev *nfit_memdev;
b94d5230 700 struct nfit_bdw *nfit_bdw;
047fc8a1
RZ
701 struct nfit_idt *nfit_idt;
702 u16 idt_idx, range_index;
b94d5230 703
b94d5230
DW
704 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
705 if (nfit_bdw->bdw->region_index != dcr)
706 continue;
707 nfit_mem->bdw = nfit_bdw->bdw;
708 break;
709 }
710
711 if (!nfit_mem->bdw)
6697b2cf 712 return;
b94d5230
DW
713
714 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
047fc8a1
RZ
715
716 if (!nfit_mem->spa_bdw)
6697b2cf 717 return;
047fc8a1
RZ
718
719 range_index = nfit_mem->spa_bdw->range_index;
720 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
721 if (nfit_memdev->memdev->range_index != range_index ||
722 nfit_memdev->memdev->region_index != dcr)
723 continue;
724 nfit_mem->memdev_bdw = nfit_memdev->memdev;
725 idt_idx = nfit_memdev->memdev->interleave_index;
726 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
727 if (nfit_idt->idt->interleave_index != idt_idx)
728 continue;
729 nfit_mem->idt_bdw = nfit_idt->idt;
730 break;
731 }
732 break;
733 }
b94d5230
DW
734}
735
736static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
737 struct acpi_nfit_system_address *spa)
738{
739 struct nfit_mem *nfit_mem, *found;
740 struct nfit_memdev *nfit_memdev;
741 int type = nfit_spa_type(spa);
b94d5230
DW
742
743 switch (type) {
744 case NFIT_SPA_DCR:
745 case NFIT_SPA_PM:
746 break;
747 default:
748 return 0;
749 }
750
751 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
ad9ac5e1 752 struct nfit_flush *nfit_flush;
6697b2cf
DW
753 struct nfit_dcr *nfit_dcr;
754 u32 device_handle;
755 u16 dcr;
b94d5230
DW
756
757 if (nfit_memdev->memdev->range_index != spa->range_index)
758 continue;
759 found = NULL;
760 dcr = nfit_memdev->memdev->region_index;
6697b2cf 761 device_handle = nfit_memdev->memdev->device_handle;
b94d5230 762 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
6697b2cf
DW
763 if (__to_nfit_memdev(nfit_mem)->device_handle
764 == device_handle) {
b94d5230
DW
765 found = nfit_mem;
766 break;
767 }
768
769 if (found)
770 nfit_mem = found;
771 else {
772 nfit_mem = devm_kzalloc(acpi_desc->dev,
773 sizeof(*nfit_mem), GFP_KERNEL);
774 if (!nfit_mem)
775 return -ENOMEM;
776 INIT_LIST_HEAD(&nfit_mem->list);
8cc6ddfc 777 nfit_mem->acpi_desc = acpi_desc;
6697b2cf
DW
778 list_add(&nfit_mem->list, &acpi_desc->dimms);
779 }
780
781 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
782 if (nfit_dcr->dcr->region_index != dcr)
783 continue;
784 /*
785 * Record the control region for the dimm. For
786 * the ACPI 6.1 case, where there are separate
787 * control regions for the pmem vs blk
788 * interfaces, be sure to record the extended
789 * blk details.
790 */
791 if (!nfit_mem->dcr)
792 nfit_mem->dcr = nfit_dcr->dcr;
793 else if (nfit_mem->dcr->windows == 0
794 && nfit_dcr->dcr->windows)
795 nfit_mem->dcr = nfit_dcr->dcr;
796 break;
797 }
798
ad9ac5e1 799 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
e5ae3b25
DW
800 struct acpi_nfit_flush_address *flush;
801 u16 i;
802
ad9ac5e1
DW
803 if (nfit_flush->flush->device_handle != device_handle)
804 continue;
805 nfit_mem->nfit_flush = nfit_flush;
e5ae3b25
DW
806 flush = nfit_flush->flush;
807 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
808 flush->hint_count
809 * sizeof(struct resource), GFP_KERNEL);
810 if (!nfit_mem->flush_wpq)
811 return -ENOMEM;
812 for (i = 0; i < flush->hint_count; i++) {
813 struct resource *res = &nfit_mem->flush_wpq[i];
814
815 res->start = flush->hint_address[i];
816 res->end = res->start + 8 - 1;
817 }
ad9ac5e1
DW
818 break;
819 }
820
6697b2cf
DW
821 if (dcr && !nfit_mem->dcr) {
822 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
823 spa->range_index, dcr);
824 return -ENODEV;
b94d5230
DW
825 }
826
827 if (type == NFIT_SPA_DCR) {
047fc8a1
RZ
828 struct nfit_idt *nfit_idt;
829 u16 idt_idx;
830
b94d5230
DW
831 /* multiple dimms may share a SPA when interleaved */
832 nfit_mem->spa_dcr = spa;
833 nfit_mem->memdev_dcr = nfit_memdev->memdev;
047fc8a1
RZ
834 idt_idx = nfit_memdev->memdev->interleave_index;
835 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
836 if (nfit_idt->idt->interleave_index != idt_idx)
837 continue;
838 nfit_mem->idt_dcr = nfit_idt->idt;
839 break;
840 }
6697b2cf 841 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
b94d5230
DW
842 } else {
843 /*
844 * A single dimm may belong to multiple SPA-PM
845 * ranges, record at least one in addition to
846 * any SPA-DCR range.
847 */
848 nfit_mem->memdev_pmem = nfit_memdev->memdev;
849 }
b94d5230
DW
850 }
851
852 return 0;
853}
854
855static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
856{
857 struct nfit_mem *a = container_of(_a, typeof(*a), list);
858 struct nfit_mem *b = container_of(_b, typeof(*b), list);
859 u32 handleA, handleB;
860
861 handleA = __to_nfit_memdev(a)->device_handle;
862 handleB = __to_nfit_memdev(b)->device_handle;
863 if (handleA < handleB)
864 return -1;
865 else if (handleA > handleB)
866 return 1;
867 return 0;
868}
869
870static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
871{
872 struct nfit_spa *nfit_spa;
873
874 /*
875 * For each SPA-DCR or SPA-PMEM address range find its
876 * corresponding MEMDEV(s). From each MEMDEV find the
877 * corresponding DCR. Then, if we're operating on a SPA-DCR,
878 * try to find a SPA-BDW and a corresponding BDW that references
879 * the DCR. Throw it all into an nfit_mem object. Note, that
880 * BDWs are optional.
881 */
882 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
883 int rc;
884
885 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
886 if (rc)
887 return rc;
888 }
889
890 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
891
892 return 0;
893}
894
45def22c
DW
895static ssize_t revision_show(struct device *dev,
896 struct device_attribute *attr, char *buf)
897{
898 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
899 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
900 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
901
6b577c9d 902 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
45def22c
DW
903}
904static DEVICE_ATTR_RO(revision);
905
9ffd6350
VV
906static ssize_t hw_error_scrub_show(struct device *dev,
907 struct device_attribute *attr, char *buf)
908{
909 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
910 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
911 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
912
913 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
914}
915
916/*
917 * The 'hw_error_scrub' attribute can have the following values written to it:
918 * '0': Switch to the default mode where an exception will only insert
919 * the address of the memory error into the poison and badblocks lists.
920 * '1': Enable a full scrub to happen if an exception for a memory error is
921 * received.
922 */
923static ssize_t hw_error_scrub_store(struct device *dev,
924 struct device_attribute *attr, const char *buf, size_t size)
925{
926 struct nvdimm_bus_descriptor *nd_desc;
927 ssize_t rc;
928 long val;
929
930 rc = kstrtol(buf, 0, &val);
931 if (rc)
932 return rc;
933
934 device_lock(dev);
935 nd_desc = dev_get_drvdata(dev);
936 if (nd_desc) {
937 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
938
939 switch (val) {
940 case HW_ERROR_SCRUB_ON:
941 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
942 break;
943 case HW_ERROR_SCRUB_OFF:
944 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
945 break;
946 default:
947 rc = -EINVAL;
948 break;
949 }
950 }
951 device_unlock(dev);
952 if (rc)
953 return rc;
954 return size;
955}
956static DEVICE_ATTR_RW(hw_error_scrub);
957
37b137ff
VV
958/*
959 * This shows the number of full Address Range Scrubs that have been
960 * completed since driver load time. Userspace can wait on this using
961 * select/poll etc. A '+' at the end indicates an ARS is in progress
962 */
963static ssize_t scrub_show(struct device *dev,
964 struct device_attribute *attr, char *buf)
965{
966 struct nvdimm_bus_descriptor *nd_desc;
967 ssize_t rc = -ENXIO;
968
969 device_lock(dev);
970 nd_desc = dev_get_drvdata(dev);
971 if (nd_desc) {
972 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
973
974 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
975 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
976 }
977 device_unlock(dev);
978 return rc;
979}
980
37b137ff
VV
981static ssize_t scrub_store(struct device *dev,
982 struct device_attribute *attr, const char *buf, size_t size)
983{
984 struct nvdimm_bus_descriptor *nd_desc;
985 ssize_t rc;
986 long val;
987
988 rc = kstrtol(buf, 0, &val);
989 if (rc)
990 return rc;
991 if (val != 1)
992 return -EINVAL;
993
994 device_lock(dev);
995 nd_desc = dev_get_drvdata(dev);
996 if (nd_desc) {
997 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
998
999 rc = acpi_nfit_ars_rescan(acpi_desc);
1000 }
1001 device_unlock(dev);
1002 if (rc)
1003 return rc;
1004 return size;
1005}
1006static DEVICE_ATTR_RW(scrub);
1007
1008static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1009{
1010 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1011 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1012 | 1 << ND_CMD_ARS_STATUS;
1013
1014 return (nd_desc->cmd_mask & mask) == mask;
1015}
1016
1017static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1018{
1019 struct device *dev = container_of(kobj, struct device, kobj);
1020 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1021
1022 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1023 return 0;
1024 return a->mode;
1025}
1026
45def22c
DW
1027static struct attribute *acpi_nfit_attributes[] = {
1028 &dev_attr_revision.attr,
37b137ff 1029 &dev_attr_scrub.attr,
9ffd6350 1030 &dev_attr_hw_error_scrub.attr,
45def22c
DW
1031 NULL,
1032};
1033
1034static struct attribute_group acpi_nfit_attribute_group = {
1035 .name = "nfit",
1036 .attrs = acpi_nfit_attributes,
37b137ff 1037 .is_visible = nfit_visible,
45def22c
DW
1038};
1039
a61fe6f7 1040static const struct attribute_group *acpi_nfit_attribute_groups[] = {
45def22c
DW
1041 &nvdimm_bus_attribute_group,
1042 &acpi_nfit_attribute_group,
1043 NULL,
1044};
1045
e6dfb2de
DW
1046static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1047{
1048 struct nvdimm *nvdimm = to_nvdimm(dev);
1049 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1050
1051 return __to_nfit_memdev(nfit_mem);
1052}
1053
1054static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1055{
1056 struct nvdimm *nvdimm = to_nvdimm(dev);
1057 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1058
1059 return nfit_mem->dcr;
1060}
1061
1062static ssize_t handle_show(struct device *dev,
1063 struct device_attribute *attr, char *buf)
1064{
1065 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1066
1067 return sprintf(buf, "%#x\n", memdev->device_handle);
1068}
1069static DEVICE_ATTR_RO(handle);
1070
1071static ssize_t phys_id_show(struct device *dev,
1072 struct device_attribute *attr, char *buf)
1073{
1074 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1075
1076 return sprintf(buf, "%#x\n", memdev->physical_id);
1077}
1078static DEVICE_ATTR_RO(phys_id);
1079
1080static ssize_t vendor_show(struct device *dev,
1081 struct device_attribute *attr, char *buf)
1082{
1083 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1084
5ad9a7fd 1085 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
e6dfb2de
DW
1086}
1087static DEVICE_ATTR_RO(vendor);
1088
1089static ssize_t rev_id_show(struct device *dev,
1090 struct device_attribute *attr, char *buf)
1091{
1092 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1093
5ad9a7fd 1094 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
e6dfb2de
DW
1095}
1096static DEVICE_ATTR_RO(rev_id);
1097
1098static ssize_t device_show(struct device *dev,
1099 struct device_attribute *attr, char *buf)
1100{
1101 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1102
5ad9a7fd 1103 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
e6dfb2de
DW
1104}
1105static DEVICE_ATTR_RO(device);
1106
6ca72085
DW
1107static ssize_t subsystem_vendor_show(struct device *dev,
1108 struct device_attribute *attr, char *buf)
1109{
1110 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1111
1112 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1113}
1114static DEVICE_ATTR_RO(subsystem_vendor);
1115
1116static ssize_t subsystem_rev_id_show(struct device *dev,
1117 struct device_attribute *attr, char *buf)
1118{
1119 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1120
1121 return sprintf(buf, "0x%04x\n",
1122 be16_to_cpu(dcr->subsystem_revision_id));
1123}
1124static DEVICE_ATTR_RO(subsystem_rev_id);
1125
1126static ssize_t subsystem_device_show(struct device *dev,
1127 struct device_attribute *attr, char *buf)
1128{
1129 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1130
1131 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1132}
1133static DEVICE_ATTR_RO(subsystem_device);
1134
8cc6ddfc
DW
1135static int num_nvdimm_formats(struct nvdimm *nvdimm)
1136{
1137 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1138 int formats = 0;
1139
1140 if (nfit_mem->memdev_pmem)
1141 formats++;
1142 if (nfit_mem->memdev_bdw)
1143 formats++;
1144 return formats;
1145}
1146
e6dfb2de
DW
1147static ssize_t format_show(struct device *dev,
1148 struct device_attribute *attr, char *buf)
1149{
1150 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1151
1bcbf42d 1152 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
e6dfb2de
DW
1153}
1154static DEVICE_ATTR_RO(format);
1155
8cc6ddfc
DW
1156static ssize_t format1_show(struct device *dev,
1157 struct device_attribute *attr, char *buf)
1158{
1159 u32 handle;
1160 ssize_t rc = -ENXIO;
1161 struct nfit_mem *nfit_mem;
1162 struct nfit_memdev *nfit_memdev;
1163 struct acpi_nfit_desc *acpi_desc;
1164 struct nvdimm *nvdimm = to_nvdimm(dev);
1165 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1166
1167 nfit_mem = nvdimm_provider_data(nvdimm);
1168 acpi_desc = nfit_mem->acpi_desc;
1169 handle = to_nfit_memdev(dev)->device_handle;
1170
1171 /* assumes DIMMs have at most 2 published interface codes */
1172 mutex_lock(&acpi_desc->init_mutex);
1173 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1174 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1175 struct nfit_dcr *nfit_dcr;
1176
1177 if (memdev->device_handle != handle)
1178 continue;
1179
1180 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1181 if (nfit_dcr->dcr->region_index != memdev->region_index)
1182 continue;
1183 if (nfit_dcr->dcr->code == dcr->code)
1184 continue;
1bcbf42d
DW
1185 rc = sprintf(buf, "0x%04x\n",
1186 le16_to_cpu(nfit_dcr->dcr->code));
8cc6ddfc
DW
1187 break;
1188 }
1189 if (rc != ENXIO)
1190 break;
1191 }
1192 mutex_unlock(&acpi_desc->init_mutex);
1193 return rc;
1194}
1195static DEVICE_ATTR_RO(format1);
1196
1197static ssize_t formats_show(struct device *dev,
1198 struct device_attribute *attr, char *buf)
1199{
1200 struct nvdimm *nvdimm = to_nvdimm(dev);
1201
1202 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1203}
1204static DEVICE_ATTR_RO(formats);
1205
e6dfb2de
DW
1206static ssize_t serial_show(struct device *dev,
1207 struct device_attribute *attr, char *buf)
1208{
1209 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1210
5ad9a7fd 1211 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
e6dfb2de
DW
1212}
1213static DEVICE_ATTR_RO(serial);
1214
a94e3fbe
DW
1215static ssize_t family_show(struct device *dev,
1216 struct device_attribute *attr, char *buf)
1217{
1218 struct nvdimm *nvdimm = to_nvdimm(dev);
1219 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1220
1221 if (nfit_mem->family < 0)
1222 return -ENXIO;
1223 return sprintf(buf, "%d\n", nfit_mem->family);
1224}
1225static DEVICE_ATTR_RO(family);
1226
1227static ssize_t dsm_mask_show(struct device *dev,
1228 struct device_attribute *attr, char *buf)
1229{
1230 struct nvdimm *nvdimm = to_nvdimm(dev);
1231 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1232
1233 if (nfit_mem->family < 0)
1234 return -ENXIO;
1235 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1236}
1237static DEVICE_ATTR_RO(dsm_mask);
1238
58138820
DW
1239static ssize_t flags_show(struct device *dev,
1240 struct device_attribute *attr, char *buf)
1241{
1242 u16 flags = to_nfit_memdev(dev)->flags;
1243
1244 return sprintf(buf, "%s%s%s%s%s\n",
402bae59
TK
1245 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1246 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1247 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
ca321d1c 1248 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
402bae59 1249 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
58138820
DW
1250}
1251static DEVICE_ATTR_RO(flags);
1252
38a879ba
TK
1253static ssize_t id_show(struct device *dev,
1254 struct device_attribute *attr, char *buf)
1255{
1256 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1257
1258 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1259 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1260 be16_to_cpu(dcr->vendor_id),
1261 dcr->manufacturing_location,
1262 be16_to_cpu(dcr->manufacturing_date),
1263 be32_to_cpu(dcr->serial_number));
1264 else
1265 return sprintf(buf, "%04x-%08x\n",
1266 be16_to_cpu(dcr->vendor_id),
1267 be32_to_cpu(dcr->serial_number));
1268}
1269static DEVICE_ATTR_RO(id);
1270
e6dfb2de
DW
1271static struct attribute *acpi_nfit_dimm_attributes[] = {
1272 &dev_attr_handle.attr,
1273 &dev_attr_phys_id.attr,
1274 &dev_attr_vendor.attr,
1275 &dev_attr_device.attr,
6ca72085
DW
1276 &dev_attr_rev_id.attr,
1277 &dev_attr_subsystem_vendor.attr,
1278 &dev_attr_subsystem_device.attr,
1279 &dev_attr_subsystem_rev_id.attr,
e6dfb2de 1280 &dev_attr_format.attr,
8cc6ddfc
DW
1281 &dev_attr_formats.attr,
1282 &dev_attr_format1.attr,
e6dfb2de 1283 &dev_attr_serial.attr,
58138820 1284 &dev_attr_flags.attr,
38a879ba 1285 &dev_attr_id.attr,
a94e3fbe
DW
1286 &dev_attr_family.attr,
1287 &dev_attr_dsm_mask.attr,
e6dfb2de
DW
1288 NULL,
1289};
1290
1291static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1292 struct attribute *a, int n)
1293{
1294 struct device *dev = container_of(kobj, struct device, kobj);
8cc6ddfc 1295 struct nvdimm *nvdimm = to_nvdimm(dev);
e6dfb2de 1296
8cc6ddfc
DW
1297 if (!to_nfit_dcr(dev))
1298 return 0;
1299 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
e6dfb2de 1300 return 0;
8cc6ddfc 1301 return a->mode;
e6dfb2de
DW
1302}
1303
1304static struct attribute_group acpi_nfit_dimm_attribute_group = {
1305 .name = "nfit",
1306 .attrs = acpi_nfit_dimm_attributes,
1307 .is_visible = acpi_nfit_dimm_attr_visible,
1308};
1309
1310static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
62232e45 1311 &nvdimm_attribute_group,
4d88a97a 1312 &nd_device_attribute_group,
e6dfb2de
DW
1313 &acpi_nfit_dimm_attribute_group,
1314 NULL,
1315};
1316
1317static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1318 u32 device_handle)
1319{
1320 struct nfit_mem *nfit_mem;
1321
1322 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1323 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1324 return nfit_mem->nvdimm;
1325
1326 return NULL;
1327}
1328
231bf117 1329void __acpi_nvdimm_notify(struct device *dev, u32 event)
ba9c8dd3
DW
1330{
1331 struct nfit_mem *nfit_mem;
1332 struct acpi_nfit_desc *acpi_desc;
1333
1334 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
1335 event);
1336
1337 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1338 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1339 event);
1340 return;
1341 }
1342
1343 acpi_desc = dev_get_drvdata(dev->parent);
1344 if (!acpi_desc)
1345 return;
1346
1347 /*
1348 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1349 * is still valid.
1350 */
1351 nfit_mem = dev_get_drvdata(dev);
1352 if (nfit_mem && nfit_mem->flags_attr)
1353 sysfs_notify_dirent(nfit_mem->flags_attr);
1354}
231bf117 1355EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
ba9c8dd3
DW
1356
1357static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1358{
1359 struct acpi_device *adev = data;
1360 struct device *dev = &adev->dev;
1361
1362 device_lock(dev->parent);
1363 __acpi_nvdimm_notify(dev, event);
1364 device_unlock(dev->parent);
1365}
1366
62232e45
DW
1367static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1368 struct nfit_mem *nfit_mem, u32 device_handle)
1369{
1370 struct acpi_device *adev, *adev_dimm;
1371 struct device *dev = acpi_desc->dev;
31eca76b
DW
1372 unsigned long dsm_mask;
1373 const u8 *uuid;
60e95f43 1374 int i;
62232e45 1375
e3654eca
DW
1376 /* nfit test assumes 1:1 relationship between commands and dsms */
1377 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
31eca76b 1378 nfit_mem->family = NVDIMM_FAMILY_INTEL;
62232e45
DW
1379 adev = to_acpi_dev(acpi_desc);
1380 if (!adev)
1381 return 0;
1382
1383 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1384 nfit_mem->adev = adev_dimm;
1385 if (!adev_dimm) {
1386 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1387 device_handle);
4d88a97a 1388 return force_enable_dimms ? 0 : -ENODEV;
62232e45
DW
1389 }
1390
ba9c8dd3
DW
1391 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1392 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1393 dev_err(dev, "%s: notification registration failed\n",
1394 dev_name(&adev_dimm->dev));
1395 return -ENXIO;
1396 }
1397
31eca76b 1398 /*
e02fb726 1399 * Until standardization materializes we need to consider 4
a7225598
DW
1400 * different command sets. Note, that checking for function0 (bit0)
1401 * tells us if any commands are reachable through this uuid.
31eca76b 1402 */
e02fb726 1403 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
a7225598 1404 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
31eca76b
DW
1405 break;
1406
1407 /* limit the supported commands to those that are publicly documented */
1408 nfit_mem->family = i;
095ab4b3
LK
1409 if (override_dsm_mask && !disable_vendor_specific)
1410 dsm_mask = override_dsm_mask;
1411 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
31eca76b 1412 dsm_mask = 0x3fe;
87554098
DW
1413 if (disable_vendor_specific)
1414 dsm_mask &= ~(1 << ND_CMD_VENDOR);
e02fb726 1415 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
31eca76b 1416 dsm_mask = 0x1c3c76;
e02fb726 1417 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
31eca76b 1418 dsm_mask = 0x1fe;
87554098
DW
1419 if (disable_vendor_specific)
1420 dsm_mask &= ~(1 << 8);
e02fb726 1421 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1422 dsm_mask = 0xffffffff;
87554098 1423 } else {
a7225598 1424 dev_dbg(dev, "unknown dimm command family\n");
31eca76b 1425 nfit_mem->family = -1;
a7225598
DW
1426 /* DSMs are optional, continue loading the driver... */
1427 return 0;
31eca76b
DW
1428 }
1429
1430 uuid = to_nfit_uuid(nfit_mem->family);
1431 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
62232e45
DW
1432 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
1433 set_bit(i, &nfit_mem->dsm_mask);
1434
60e95f43 1435 return 0;
62232e45
DW
1436}
1437
ba9c8dd3
DW
1438static void shutdown_dimm_notify(void *data)
1439{
1440 struct acpi_nfit_desc *acpi_desc = data;
1441 struct nfit_mem *nfit_mem;
1442
1443 mutex_lock(&acpi_desc->init_mutex);
1444 /*
1445 * Clear out the nfit_mem->flags_attr and shut down dimm event
1446 * notifications.
1447 */
1448 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
231bf117
DW
1449 struct acpi_device *adev_dimm = nfit_mem->adev;
1450
ba9c8dd3
DW
1451 if (nfit_mem->flags_attr) {
1452 sysfs_put(nfit_mem->flags_attr);
1453 nfit_mem->flags_attr = NULL;
1454 }
231bf117
DW
1455 if (adev_dimm)
1456 acpi_remove_notify_handler(adev_dimm->handle,
1457 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
ba9c8dd3
DW
1458 }
1459 mutex_unlock(&acpi_desc->init_mutex);
1460}
1461
e6dfb2de
DW
1462static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1463{
1464 struct nfit_mem *nfit_mem;
ba9c8dd3
DW
1465 int dimm_count = 0, rc;
1466 struct nvdimm *nvdimm;
e6dfb2de
DW
1467
1468 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
e5ae3b25 1469 struct acpi_nfit_flush_address *flush;
31eca76b 1470 unsigned long flags = 0, cmd_mask;
e6dfb2de 1471 u32 device_handle;
58138820 1472 u16 mem_flags;
e6dfb2de
DW
1473
1474 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1475 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1476 if (nvdimm) {
20985164 1477 dimm_count++;
e6dfb2de
DW
1478 continue;
1479 }
1480
1481 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1482 flags |= NDD_ALIASING;
1483
58138820 1484 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
ca321d1c 1485 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
58138820
DW
1486 flags |= NDD_UNARMED;
1487
62232e45
DW
1488 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1489 if (rc)
1490 continue;
1491
e3654eca 1492 /*
31eca76b
DW
1493 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1494 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1495 * userspace interface.
e3654eca 1496 */
31eca76b
DW
1497 cmd_mask = 1UL << ND_CMD_CALL;
1498 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1499 cmd_mask |= nfit_mem->dsm_mask;
1500
e5ae3b25
DW
1501 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1502 : NULL;
e6dfb2de 1503 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
62232e45 1504 acpi_nfit_dimm_attribute_groups,
e5ae3b25
DW
1505 flags, cmd_mask, flush ? flush->hint_count : 0,
1506 nfit_mem->flush_wpq);
e6dfb2de
DW
1507 if (!nvdimm)
1508 return -ENOMEM;
1509
1510 nfit_mem->nvdimm = nvdimm;
4d88a97a 1511 dimm_count++;
58138820
DW
1512
1513 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1514 continue;
1515
402bae59 1516 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
58138820 1517 nvdimm_name(nvdimm),
402bae59
TK
1518 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1519 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1520 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
ca321d1c 1521 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
58138820 1522
e6dfb2de
DW
1523 }
1524
ba9c8dd3
DW
1525 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1526 if (rc)
1527 return rc;
1528
1529 /*
1530 * Now that dimms are successfully registered, and async registration
1531 * is flushed, attempt to enable event notification.
1532 */
1533 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1534 struct kernfs_node *nfit_kernfs;
1535
1536 nvdimm = nfit_mem->nvdimm;
1537 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1538 if (nfit_kernfs)
1539 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1540 "flags");
1541 sysfs_put(nfit_kernfs);
1542 if (!nfit_mem->flags_attr)
1543 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1544 nvdimm_name(nvdimm));
1545 }
1546
1547 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1548 acpi_desc);
e6dfb2de
DW
1549}
1550
62232e45
DW
1551static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1552{
1553 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1554 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1555 struct acpi_device *adev;
1556 int i;
1557
e3654eca 1558 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
62232e45
DW
1559 adev = to_acpi_dev(acpi_desc);
1560 if (!adev)
1561 return;
1562
d4f32367 1563 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
62232e45 1564 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
e3654eca 1565 set_bit(i, &nd_desc->cmd_mask);
62232e45
DW
1566}
1567
1f7df6f8
DW
1568static ssize_t range_index_show(struct device *dev,
1569 struct device_attribute *attr, char *buf)
1570{
1571 struct nd_region *nd_region = to_nd_region(dev);
1572 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1573
1574 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1575}
1576static DEVICE_ATTR_RO(range_index);
1577
1578static struct attribute *acpi_nfit_region_attributes[] = {
1579 &dev_attr_range_index.attr,
1580 NULL,
1581};
1582
1583static struct attribute_group acpi_nfit_region_attribute_group = {
1584 .name = "nfit",
1585 .attrs = acpi_nfit_region_attributes,
1586};
1587
1588static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1589 &nd_region_attribute_group,
1590 &nd_mapping_attribute_group,
3d88002e 1591 &nd_device_attribute_group,
74ae66c3 1592 &nd_numa_attribute_group,
1f7df6f8
DW
1593 &acpi_nfit_region_attribute_group,
1594 NULL,
1595};
1596
eaf96153
DW
1597/* enough info to uniquely specify an interleave set */
1598struct nfit_set_info {
1599 struct nfit_set_info_map {
1600 u64 region_offset;
1601 u32 serial_number;
1602 u32 pad;
1603 } mapping[0];
1604};
1605
1606static size_t sizeof_nfit_set_info(int num_mappings)
1607{
1608 return sizeof(struct nfit_set_info)
1609 + num_mappings * sizeof(struct nfit_set_info_map);
1610}
1611
86ef58a4 1612static int cmp_map_compat(const void *m0, const void *m1)
eaf96153
DW
1613{
1614 const struct nfit_set_info_map *map0 = m0;
1615 const struct nfit_set_info_map *map1 = m1;
1616
1617 return memcmp(&map0->region_offset, &map1->region_offset,
1618 sizeof(u64));
1619}
1620
86ef58a4
DW
1621static int cmp_map(const void *m0, const void *m1)
1622{
1623 const struct nfit_set_info_map *map0 = m0;
1624 const struct nfit_set_info_map *map1 = m1;
1625
b03b99a3
DW
1626 if (map0->region_offset < map1->region_offset)
1627 return -1;
1628 else if (map0->region_offset > map1->region_offset)
1629 return 1;
1630 return 0;
86ef58a4
DW
1631}
1632
eaf96153
DW
1633/* Retrieve the nth entry referencing this spa */
1634static struct acpi_nfit_memory_map *memdev_from_spa(
1635 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1636{
1637 struct nfit_memdev *nfit_memdev;
1638
1639 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1640 if (nfit_memdev->memdev->range_index == range_index)
1641 if (n-- == 0)
1642 return nfit_memdev->memdev;
1643 return NULL;
1644}
1645
1646static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1647 struct nd_region_desc *ndr_desc,
1648 struct acpi_nfit_system_address *spa)
1649{
1650 int i, spa_type = nfit_spa_type(spa);
1651 struct device *dev = acpi_desc->dev;
1652 struct nd_interleave_set *nd_set;
1653 u16 nr = ndr_desc->num_mappings;
1654 struct nfit_set_info *info;
1655
1656 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1657 /* pass */;
1658 else
1659 return 0;
1660
1661 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1662 if (!nd_set)
1663 return -ENOMEM;
1664
1665 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1666 if (!info)
1667 return -ENOMEM;
1668 for (i = 0; i < nr; i++) {
44c462eb 1669 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
eaf96153 1670 struct nfit_set_info_map *map = &info->mapping[i];
44c462eb 1671 struct nvdimm *nvdimm = mapping->nvdimm;
eaf96153
DW
1672 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1673 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1674 spa->range_index, i);
1675
1676 if (!memdev || !nfit_mem->dcr) {
1677 dev_err(dev, "%s: failed to find DCR\n", __func__);
1678 return -ENODEV;
1679 }
1680
1681 map->region_offset = memdev->region_offset;
1682 map->serial_number = nfit_mem->dcr->serial_number;
1683 }
1684
1685 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1686 cmp_map, NULL);
1687 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
86ef58a4
DW
1688
1689 /* support namespaces created with the wrong sort order */
1690 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1691 cmp_map_compat, NULL);
1692 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1693
eaf96153
DW
1694 ndr_desc->nd_set = nd_set;
1695 devm_kfree(dev, info);
1696
1697 return 0;
1698}
1699
047fc8a1
RZ
1700static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1701{
1702 struct acpi_nfit_interleave *idt = mmio->idt;
1703 u32 sub_line_offset, line_index, line_offset;
1704 u64 line_no, table_skip_count, table_offset;
1705
1706 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1707 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1708 line_offset = idt->line_offset[line_index]
1709 * mmio->line_size;
1710 table_offset = table_skip_count * mmio->table_size;
1711
1712 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1713}
1714
de4a196c 1715static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
047fc8a1
RZ
1716{
1717 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1718 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
68202c9f 1719 const u32 STATUS_MASK = 0x80000037;
047fc8a1
RZ
1720
1721 if (mmio->num_lines)
1722 offset = to_interleave_offset(offset, mmio);
1723
68202c9f 1724 return readl(mmio->addr.base + offset) & STATUS_MASK;
047fc8a1
RZ
1725}
1726
1727static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1728 resource_size_t dpa, unsigned int len, unsigned int write)
1729{
1730 u64 cmd, offset;
1731 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1732
1733 enum {
1734 BCW_OFFSET_MASK = (1ULL << 48)-1,
1735 BCW_LEN_SHIFT = 48,
1736 BCW_LEN_MASK = (1ULL << 8) - 1,
1737 BCW_CMD_SHIFT = 56,
1738 };
1739
1740 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1741 len = len >> L1_CACHE_SHIFT;
1742 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1743 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1744
1745 offset = nfit_blk->cmd_offset + mmio->size * bw;
1746 if (mmio->num_lines)
1747 offset = to_interleave_offset(offset, mmio);
1748
67a3e8fe 1749 writeq(cmd, mmio->addr.base + offset);
f284a4f2 1750 nvdimm_flush(nfit_blk->nd_region);
f0f2c072 1751
aef25338 1752 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
67a3e8fe 1753 readq(mmio->addr.base + offset);
047fc8a1
RZ
1754}
1755
1756static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1757 resource_size_t dpa, void *iobuf, size_t len, int rw,
1758 unsigned int lane)
1759{
1760 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1761 unsigned int copied = 0;
1762 u64 base_offset;
1763 int rc;
1764
1765 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1766 + lane * mmio->size;
047fc8a1
RZ
1767 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1768 while (len) {
1769 unsigned int c;
1770 u64 offset;
1771
1772 if (mmio->num_lines) {
1773 u32 line_offset;
1774
1775 offset = to_interleave_offset(base_offset + copied,
1776 mmio);
1777 div_u64_rem(offset, mmio->line_size, &line_offset);
1778 c = min_t(size_t, len, mmio->line_size - line_offset);
1779 } else {
1780 offset = base_offset + nfit_blk->bdw_offset;
1781 c = len;
1782 }
1783
1784 if (rw)
67a3e8fe 1785 memcpy_to_pmem(mmio->addr.aperture + offset,
c2ad2954 1786 iobuf + copied, c);
67a3e8fe 1787 else {
aef25338 1788 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
67a3e8fe
RZ
1789 mmio_flush_range((void __force *)
1790 mmio->addr.aperture + offset, c);
1791
c2ad2954 1792 memcpy_from_pmem(iobuf + copied,
67a3e8fe
RZ
1793 mmio->addr.aperture + offset, c);
1794 }
047fc8a1
RZ
1795
1796 copied += c;
1797 len -= c;
1798 }
c2ad2954
RZ
1799
1800 if (rw)
f284a4f2 1801 nvdimm_flush(nfit_blk->nd_region);
c2ad2954 1802
047fc8a1
RZ
1803 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1804 return rc;
1805}
1806
1807static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1808 resource_size_t dpa, void *iobuf, u64 len, int rw)
1809{
1810 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1811 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1812 struct nd_region *nd_region = nfit_blk->nd_region;
1813 unsigned int lane, copied = 0;
1814 int rc = 0;
1815
1816 lane = nd_region_acquire_lane(nd_region);
1817 while (len) {
1818 u64 c = min(len, mmio->size);
1819
1820 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1821 iobuf + copied, c, rw, lane);
1822 if (rc)
1823 break;
1824
1825 copied += c;
1826 len -= c;
1827 }
1828 nd_region_release_lane(nd_region, lane);
1829
1830 return rc;
1831}
1832
047fc8a1
RZ
1833static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1834 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1835{
1836 if (idt) {
1837 mmio->num_lines = idt->line_count;
1838 mmio->line_size = idt->line_size;
1839 if (interleave_ways == 0)
1840 return -ENXIO;
1841 mmio->table_size = mmio->num_lines * interleave_ways
1842 * mmio->line_size;
1843 }
1844
1845 return 0;
1846}
1847
f0f2c072
RZ
1848static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1849 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1850{
1851 struct nd_cmd_dimm_flags flags;
1852 int rc;
1853
1854 memset(&flags, 0, sizeof(flags));
1855 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
aef25338 1856 sizeof(flags), NULL);
f0f2c072
RZ
1857
1858 if (rc >= 0 && flags.status == 0)
1859 nfit_blk->dimm_flags = flags.flags;
1860 else if (rc == -ENOTTY) {
1861 /* fall back to a conservative default */
aef25338 1862 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
f0f2c072
RZ
1863 rc = 0;
1864 } else
1865 rc = -ENXIO;
1866
1867 return rc;
1868}
1869
047fc8a1
RZ
1870static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1871 struct device *dev)
1872{
1873 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
047fc8a1
RZ
1874 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1875 struct nfit_blk_mmio *mmio;
1876 struct nfit_blk *nfit_blk;
1877 struct nfit_mem *nfit_mem;
1878 struct nvdimm *nvdimm;
1879 int rc;
1880
1881 nvdimm = nd_blk_region_to_dimm(ndbr);
1882 nfit_mem = nvdimm_provider_data(nvdimm);
1883 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1884 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1885 nfit_mem ? "" : " nfit_mem",
193ccca4
DW
1886 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1887 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
047fc8a1
RZ
1888 return -ENXIO;
1889 }
1890
1891 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1892 if (!nfit_blk)
1893 return -ENOMEM;
1894 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1895 nfit_blk->nd_region = to_nd_region(dev);
1896
1897 /* map block aperture memory */
1898 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1899 mmio = &nfit_blk->mmio[BDW];
29b9aa0a
DW
1900 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
1901 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
67a3e8fe 1902 if (!mmio->addr.base) {
047fc8a1
RZ
1903 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1904 nvdimm_name(nvdimm));
1905 return -ENOMEM;
1906 }
1907 mmio->size = nfit_mem->bdw->size;
1908 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1909 mmio->idt = nfit_mem->idt_bdw;
1910 mmio->spa = nfit_mem->spa_bdw;
1911 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1912 nfit_mem->memdev_bdw->interleave_ways);
1913 if (rc) {
1914 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1915 __func__, nvdimm_name(nvdimm));
1916 return rc;
1917 }
1918
1919 /* map block control memory */
1920 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1921 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1922 mmio = &nfit_blk->mmio[DCR];
29b9aa0a
DW
1923 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
1924 nfit_mem->spa_dcr->length);
67a3e8fe 1925 if (!mmio->addr.base) {
047fc8a1
RZ
1926 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1927 nvdimm_name(nvdimm));
1928 return -ENOMEM;
1929 }
1930 mmio->size = nfit_mem->dcr->window_size;
1931 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1932 mmio->idt = nfit_mem->idt_dcr;
1933 mmio->spa = nfit_mem->spa_dcr;
1934 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1935 nfit_mem->memdev_dcr->interleave_ways);
1936 if (rc) {
1937 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1938 __func__, nvdimm_name(nvdimm));
1939 return rc;
1940 }
1941
f0f2c072
RZ
1942 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1943 if (rc < 0) {
1944 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1945 __func__, nvdimm_name(nvdimm));
1946 return rc;
1947 }
1948
f284a4f2 1949 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
c2ad2954
RZ
1950 dev_warn(dev, "unable to guarantee persistence of writes\n");
1951
047fc8a1
RZ
1952 if (mmio->line_size == 0)
1953 return 0;
1954
1955 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1956 + 8 > mmio->line_size) {
1957 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1958 return -ENXIO;
1959 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1960 + 8 > mmio->line_size) {
1961 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1962 return -ENXIO;
1963 }
1964
1965 return 0;
1966}
1967
aef25338 1968static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1cf03c00 1969 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
0caeef63 1970{
aef25338 1971 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1cf03c00 1972 struct acpi_nfit_system_address *spa = nfit_spa->spa;
aef25338
DW
1973 int cmd_rc, rc;
1974
1cf03c00
DW
1975 cmd->address = spa->address;
1976 cmd->length = spa->length;
aef25338
DW
1977 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1978 sizeof(*cmd), &cmd_rc);
1979 if (rc < 0)
1980 return rc;
1cf03c00 1981 return cmd_rc;
0caeef63
VV
1982}
1983
1cf03c00 1984static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
0caeef63
VV
1985{
1986 int rc;
1cf03c00
DW
1987 int cmd_rc;
1988 struct nd_cmd_ars_start ars_start;
1989 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1990 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
0caeef63 1991
1cf03c00
DW
1992 memset(&ars_start, 0, sizeof(ars_start));
1993 ars_start.address = spa->address;
1994 ars_start.length = spa->length;
1995 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1996 ars_start.type = ND_ARS_PERSISTENT;
1997 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1998 ars_start.type = ND_ARS_VOLATILE;
1999 else
2000 return -ENOTTY;
aef25338 2001
1cf03c00
DW
2002 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2003 sizeof(ars_start), &cmd_rc);
aef25338 2004
1cf03c00
DW
2005 if (rc < 0)
2006 return rc;
2007 return cmd_rc;
0caeef63
VV
2008}
2009
1cf03c00 2010static int ars_continue(struct acpi_nfit_desc *acpi_desc)
0caeef63 2011{
aef25338 2012 int rc, cmd_rc;
1cf03c00
DW
2013 struct nd_cmd_ars_start ars_start;
2014 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2015 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2016
2017 memset(&ars_start, 0, sizeof(ars_start));
2018 ars_start.address = ars_status->restart_address;
2019 ars_start.length = ars_status->restart_length;
2020 ars_start.type = ars_status->type;
2021 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2022 sizeof(ars_start), &cmd_rc);
2023 if (rc < 0)
2024 return rc;
2025 return cmd_rc;
2026}
0caeef63 2027
1cf03c00
DW
2028static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2029{
2030 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2031 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2032 int rc, cmd_rc;
aef25338 2033
1cf03c00
DW
2034 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2035 acpi_desc->ars_status_size, &cmd_rc);
2036 if (rc < 0)
2037 return rc;
2038 return cmd_rc;
0caeef63
VV
2039}
2040
82aa37cf 2041static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
1cf03c00 2042 struct nd_cmd_ars_status *ars_status)
0caeef63 2043{
82aa37cf 2044 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
0caeef63
VV
2045 int rc;
2046 u32 i;
2047
82aa37cf
DW
2048 /*
2049 * First record starts at 44 byte offset from the start of the
2050 * payload.
2051 */
2052 if (ars_status->out_length < 44)
2053 return 0;
0caeef63 2054 for (i = 0; i < ars_status->num_records; i++) {
82aa37cf
DW
2055 /* only process full records */
2056 if (ars_status->out_length
2057 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2058 break;
0caeef63
VV
2059 rc = nvdimm_bus_add_poison(nvdimm_bus,
2060 ars_status->records[i].err_address,
2061 ars_status->records[i].length);
2062 if (rc)
2063 return rc;
2064 }
82aa37cf
DW
2065 if (i < ars_status->num_records)
2066 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
0caeef63
VV
2067
2068 return 0;
2069}
2070
af1996ef
TK
2071static void acpi_nfit_remove_resource(void *data)
2072{
2073 struct resource *res = data;
2074
2075 remove_resource(res);
2076}
2077
2078static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2079 struct nd_region_desc *ndr_desc)
2080{
2081 struct resource *res, *nd_res = ndr_desc->res;
2082 int is_pmem, ret;
2083
2084 /* No operation if the region is already registered as PMEM */
2085 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2086 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2087 if (is_pmem == REGION_INTERSECTS)
2088 return 0;
2089
2090 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2091 if (!res)
2092 return -ENOMEM;
2093
2094 res->name = "Persistent Memory";
2095 res->start = nd_res->start;
2096 res->end = nd_res->end;
2097 res->flags = IORESOURCE_MEM;
2098 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2099
2100 ret = insert_resource(&iomem_resource, res);
2101 if (ret)
2102 return ret;
2103
d932dd2c
SV
2104 ret = devm_add_action_or_reset(acpi_desc->dev,
2105 acpi_nfit_remove_resource,
2106 res);
2107 if (ret)
af1996ef 2108 return ret;
af1996ef
TK
2109
2110 return 0;
2111}
2112
1f7df6f8 2113static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
44c462eb 2114 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
1f7df6f8 2115 struct acpi_nfit_memory_map *memdev,
1cf03c00 2116 struct nfit_spa *nfit_spa)
1f7df6f8
DW
2117{
2118 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2119 memdev->device_handle);
1cf03c00 2120 struct acpi_nfit_system_address *spa = nfit_spa->spa;
047fc8a1 2121 struct nd_blk_region_desc *ndbr_desc;
1f7df6f8
DW
2122 struct nfit_mem *nfit_mem;
2123 int blk_valid = 0;
2124
2125 if (!nvdimm) {
2126 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2127 spa->range_index, memdev->device_handle);
2128 return -ENODEV;
2129 }
2130
44c462eb 2131 mapping->nvdimm = nvdimm;
1f7df6f8
DW
2132 switch (nfit_spa_type(spa)) {
2133 case NFIT_SPA_PM:
2134 case NFIT_SPA_VOLATILE:
44c462eb
DW
2135 mapping->start = memdev->address;
2136 mapping->size = memdev->region_size;
1f7df6f8
DW
2137 break;
2138 case NFIT_SPA_DCR:
2139 nfit_mem = nvdimm_provider_data(nvdimm);
2140 if (!nfit_mem || !nfit_mem->bdw) {
2141 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2142 spa->range_index, nvdimm_name(nvdimm));
2143 } else {
44c462eb
DW
2144 mapping->size = nfit_mem->bdw->capacity;
2145 mapping->start = nfit_mem->bdw->start_address;
5212e11f 2146 ndr_desc->num_lanes = nfit_mem->bdw->windows;
1f7df6f8
DW
2147 blk_valid = 1;
2148 }
2149
44c462eb 2150 ndr_desc->mapping = mapping;
1f7df6f8 2151 ndr_desc->num_mappings = blk_valid;
047fc8a1
RZ
2152 ndbr_desc = to_blk_region_desc(ndr_desc);
2153 ndbr_desc->enable = acpi_nfit_blk_region_enable;
6bc75619 2154 ndbr_desc->do_io = acpi_desc->blk_do_io;
1cf03c00
DW
2155 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2156 ndr_desc);
2157 if (!nfit_spa->nd_region)
1f7df6f8
DW
2158 return -ENOMEM;
2159 break;
2160 }
2161
2162 return 0;
2163}
2164
c2f32acd
LCY
2165static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2166{
2167 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2168 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2169 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2170 nfit_spa_type(spa) == NFIT_SPA_PCD);
2171}
2172
1f7df6f8
DW
2173static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2174 struct nfit_spa *nfit_spa)
2175{
44c462eb 2176 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
1f7df6f8 2177 struct acpi_nfit_system_address *spa = nfit_spa->spa;
047fc8a1
RZ
2178 struct nd_blk_region_desc ndbr_desc;
2179 struct nd_region_desc *ndr_desc;
1f7df6f8 2180 struct nfit_memdev *nfit_memdev;
1f7df6f8
DW
2181 struct nvdimm_bus *nvdimm_bus;
2182 struct resource res;
eaf96153 2183 int count = 0, rc;
1f7df6f8 2184
1cf03c00 2185 if (nfit_spa->nd_region)
20985164
VV
2186 return 0;
2187
c2f32acd 2188 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
1f7df6f8
DW
2189 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2190 __func__);
2191 return 0;
2192 }
2193
2194 memset(&res, 0, sizeof(res));
44c462eb 2195 memset(&mappings, 0, sizeof(mappings));
047fc8a1 2196 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1f7df6f8
DW
2197 res.start = spa->address;
2198 res.end = res.start + spa->length - 1;
047fc8a1
RZ
2199 ndr_desc = &ndbr_desc.ndr_desc;
2200 ndr_desc->res = &res;
2201 ndr_desc->provider_data = nfit_spa;
2202 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
41d7a6d6
TK
2203 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2204 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2205 spa->proximity_domain);
2206 else
2207 ndr_desc->numa_node = NUMA_NO_NODE;
2208
1f7df6f8
DW
2209 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2210 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
44c462eb 2211 struct nd_mapping_desc *mapping;
1f7df6f8
DW
2212
2213 if (memdev->range_index != spa->range_index)
2214 continue;
2215 if (count >= ND_MAX_MAPPINGS) {
2216 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2217 spa->range_index, ND_MAX_MAPPINGS);
2218 return -ENXIO;
2219 }
44c462eb
DW
2220 mapping = &mappings[count++];
2221 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
1cf03c00 2222 memdev, nfit_spa);
1f7df6f8 2223 if (rc)
1cf03c00 2224 goto out;
1f7df6f8
DW
2225 }
2226
44c462eb 2227 ndr_desc->mapping = mappings;
047fc8a1
RZ
2228 ndr_desc->num_mappings = count;
2229 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
eaf96153 2230 if (rc)
1cf03c00 2231 goto out;
eaf96153 2232
1f7df6f8
DW
2233 nvdimm_bus = acpi_desc->nvdimm_bus;
2234 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
af1996ef 2235 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
48901165 2236 if (rc) {
af1996ef
TK
2237 dev_warn(acpi_desc->dev,
2238 "failed to insert pmem resource to iomem: %d\n",
2239 rc);
48901165 2240 goto out;
0caeef63 2241 }
48901165 2242
1cf03c00
DW
2243 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2244 ndr_desc);
2245 if (!nfit_spa->nd_region)
2246 rc = -ENOMEM;
1f7df6f8 2247 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1cf03c00
DW
2248 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2249 ndr_desc);
2250 if (!nfit_spa->nd_region)
2251 rc = -ENOMEM;
c2f32acd
LCY
2252 } else if (nfit_spa_is_virtual(spa)) {
2253 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2254 ndr_desc);
2255 if (!nfit_spa->nd_region)
2256 rc = -ENOMEM;
1f7df6f8 2257 }
20985164 2258
1cf03c00
DW
2259 out:
2260 if (rc)
2261 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2262 nfit_spa->spa->range_index);
2263 return rc;
2264}
2265
2266static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2267 u32 max_ars)
2268{
2269 struct device *dev = acpi_desc->dev;
2270 struct nd_cmd_ars_status *ars_status;
2271
2272 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2273 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2274 return 0;
2275 }
2276
2277 if (acpi_desc->ars_status)
2278 devm_kfree(dev, acpi_desc->ars_status);
2279 acpi_desc->ars_status = NULL;
2280 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2281 if (!ars_status)
2282 return -ENOMEM;
2283 acpi_desc->ars_status = ars_status;
2284 acpi_desc->ars_status_size = max_ars;
1f7df6f8
DW
2285 return 0;
2286}
2287
1cf03c00
DW
2288static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2289 struct nfit_spa *nfit_spa)
2290{
2291 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2292 int rc;
2293
2294 if (!nfit_spa->max_ars) {
2295 struct nd_cmd_ars_cap ars_cap;
2296
2297 memset(&ars_cap, 0, sizeof(ars_cap));
2298 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2299 if (rc < 0)
2300 return rc;
2301 nfit_spa->max_ars = ars_cap.max_ars_out;
2302 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2303 /* check that the supported scrub types match the spa type */
2304 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2305 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2306 return -ENOTTY;
2307 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2308 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2309 return -ENOTTY;
2310 }
2311
2312 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2313 return -ENOMEM;
2314
2315 rc = ars_get_status(acpi_desc);
2316 if (rc < 0 && rc != -ENOSPC)
2317 return rc;
2318
82aa37cf 2319 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
1cf03c00
DW
2320 return -ENOMEM;
2321
2322 return 0;
2323}
2324
2325static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2326 struct nfit_spa *nfit_spa)
2327{
2328 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2329 unsigned int overflow_retry = scrub_overflow_abort;
2330 u64 init_ars_start = 0, init_ars_len = 0;
2331 struct device *dev = acpi_desc->dev;
2332 unsigned int tmo = scrub_timeout;
2333 int rc;
2334
37b137ff 2335 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
1cf03c00
DW
2336 return;
2337
2338 rc = ars_start(acpi_desc, nfit_spa);
2339 /*
2340 * If we timed out the initial scan we'll still be busy here,
2341 * and will wait another timeout before giving up permanently.
2342 */
2343 if (rc < 0 && rc != -EBUSY)
2344 return;
2345
2346 do {
2347 u64 ars_start, ars_len;
2348
2349 if (acpi_desc->cancel)
2350 break;
2351 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2352 if (rc == -ENOTTY)
2353 break;
2354 if (rc == -EBUSY && !tmo) {
2355 dev_warn(dev, "range %d ars timeout, aborting\n",
2356 spa->range_index);
2357 break;
2358 }
2359
2360 if (rc == -EBUSY) {
2361 /*
2362 * Note, entries may be appended to the list
2363 * while the lock is dropped, but the workqueue
2364 * being active prevents entries being deleted /
2365 * freed.
2366 */
2367 mutex_unlock(&acpi_desc->init_mutex);
2368 ssleep(1);
2369 tmo--;
2370 mutex_lock(&acpi_desc->init_mutex);
2371 continue;
2372 }
2373
2374 /* we got some results, but there are more pending... */
2375 if (rc == -ENOSPC && overflow_retry--) {
2376 if (!init_ars_len) {
2377 init_ars_len = acpi_desc->ars_status->length;
2378 init_ars_start = acpi_desc->ars_status->address;
2379 }
2380 rc = ars_continue(acpi_desc);
2381 }
2382
2383 if (rc < 0) {
2384 dev_warn(dev, "range %d ars continuation failed\n",
2385 spa->range_index);
2386 break;
2387 }
2388
2389 if (init_ars_len) {
2390 ars_start = init_ars_start;
2391 ars_len = init_ars_len;
2392 } else {
2393 ars_start = acpi_desc->ars_status->address;
2394 ars_len = acpi_desc->ars_status->length;
2395 }
2396 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2397 spa->range_index, ars_start, ars_len);
2398 /* notify the region about new poison entries */
2399 nvdimm_region_notify(nfit_spa->nd_region,
2400 NVDIMM_REVALIDATE_POISON);
2401 break;
2402 } while (1);
2403}
2404
2405static void acpi_nfit_scrub(struct work_struct *work)
1f7df6f8 2406{
1cf03c00
DW
2407 struct device *dev;
2408 u64 init_scrub_length = 0;
1f7df6f8 2409 struct nfit_spa *nfit_spa;
1cf03c00
DW
2410 u64 init_scrub_address = 0;
2411 bool init_ars_done = false;
2412 struct acpi_nfit_desc *acpi_desc;
2413 unsigned int tmo = scrub_timeout;
2414 unsigned int overflow_retry = scrub_overflow_abort;
2415
2416 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2417 dev = acpi_desc->dev;
1f7df6f8 2418
1cf03c00
DW
2419 /*
2420 * We scrub in 2 phases. The first phase waits for any platform
2421 * firmware initiated scrubs to complete and then we go search for the
2422 * affected spa regions to mark them scanned. In the second phase we
2423 * initiate a directed scrub for every range that was not scrubbed in
37b137ff
VV
2424 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2425 * the first phase, but really only care about running phase 2, where
2426 * regions can be notified of new poison.
1cf03c00
DW
2427 */
2428
2429 /* process platform firmware initiated scrubs */
2430 retry:
2431 mutex_lock(&acpi_desc->init_mutex);
1f7df6f8 2432 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1cf03c00
DW
2433 struct nd_cmd_ars_status *ars_status;
2434 struct acpi_nfit_system_address *spa;
2435 u64 ars_start, ars_len;
2436 int rc;
1f7df6f8 2437
1cf03c00
DW
2438 if (acpi_desc->cancel)
2439 break;
2440
2441 if (nfit_spa->nd_region)
2442 continue;
2443
2444 if (init_ars_done) {
2445 /*
2446 * No need to re-query, we're now just
2447 * reconciling all the ranges covered by the
2448 * initial scrub
2449 */
2450 rc = 0;
2451 } else
2452 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2453
2454 if (rc == -ENOTTY) {
2455 /* no ars capability, just register spa and move on */
2456 acpi_nfit_register_region(acpi_desc, nfit_spa);
2457 continue;
2458 }
2459
2460 if (rc == -EBUSY && !tmo) {
2461 /* fallthrough to directed scrub in phase 2 */
2462 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2463 break;
2464 } else if (rc == -EBUSY) {
2465 mutex_unlock(&acpi_desc->init_mutex);
2466 ssleep(1);
2467 tmo--;
2468 goto retry;
2469 }
2470
2471 /* we got some results, but there are more pending... */
2472 if (rc == -ENOSPC && overflow_retry--) {
2473 ars_status = acpi_desc->ars_status;
2474 /*
2475 * Record the original scrub range, so that we
2476 * can recall all the ranges impacted by the
2477 * initial scrub.
2478 */
2479 if (!init_scrub_length) {
2480 init_scrub_length = ars_status->length;
2481 init_scrub_address = ars_status->address;
2482 }
2483 rc = ars_continue(acpi_desc);
2484 if (rc == 0) {
2485 mutex_unlock(&acpi_desc->init_mutex);
2486 goto retry;
2487 }
2488 }
2489
2490 if (rc < 0) {
2491 /*
2492 * Initial scrub failed, we'll give it one more
2493 * try below...
2494 */
2495 break;
2496 }
2497
2498 /* We got some final results, record completed ranges */
2499 ars_status = acpi_desc->ars_status;
2500 if (init_scrub_length) {
2501 ars_start = init_scrub_address;
2502 ars_len = ars_start + init_scrub_length;
2503 } else {
2504 ars_start = ars_status->address;
2505 ars_len = ars_status->length;
2506 }
2507 spa = nfit_spa->spa;
2508
2509 if (!init_ars_done) {
2510 init_ars_done = true;
2511 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2512 ars_start, ars_len);
2513 }
2514 if (ars_start <= spa->address && ars_start + ars_len
2515 >= spa->address + spa->length)
2516 acpi_nfit_register_region(acpi_desc, nfit_spa);
1f7df6f8 2517 }
1cf03c00
DW
2518
2519 /*
2520 * For all the ranges not covered by an initial scrub we still
2521 * want to see if there are errors, but it's ok to discover them
2522 * asynchronously.
2523 */
2524 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2525 /*
2526 * Flag all the ranges that still need scrubbing, but
2527 * register them now to make data available.
2528 */
37b137ff
VV
2529 if (!nfit_spa->nd_region) {
2530 nfit_spa->ars_required = 1;
1cf03c00 2531 acpi_nfit_register_region(acpi_desc, nfit_spa);
37b137ff 2532 }
1cf03c00
DW
2533 }
2534
2535 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2536 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
37b137ff
VV
2537 acpi_desc->scrub_count++;
2538 if (acpi_desc->scrub_count_state)
2539 sysfs_notify_dirent(acpi_desc->scrub_count_state);
1cf03c00
DW
2540 mutex_unlock(&acpi_desc->init_mutex);
2541}
2542
2543static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2544{
2545 struct nfit_spa *nfit_spa;
2546 int rc;
2547
2548 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2549 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2550 /* BLK regions don't need to wait for ars results */
2551 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2552 if (rc)
2553 return rc;
2554 }
2555
2556 queue_work(nfit_wq, &acpi_desc->work);
1f7df6f8
DW
2557 return 0;
2558}
2559
20985164
VV
2560static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2561 struct nfit_table_prev *prev)
2562{
2563 struct device *dev = acpi_desc->dev;
2564
2565 if (!list_empty(&prev->spas) ||
2566 !list_empty(&prev->memdevs) ||
2567 !list_empty(&prev->dcrs) ||
2568 !list_empty(&prev->bdws) ||
2569 !list_empty(&prev->idts) ||
2570 !list_empty(&prev->flushes)) {
2571 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2572 return -ENXIO;
2573 }
2574 return 0;
2575}
2576
37b137ff
VV
2577static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
2578{
2579 struct device *dev = acpi_desc->dev;
2580 struct kernfs_node *nfit;
2581 struct device *bus_dev;
2582
2583 if (!ars_supported(acpi_desc->nvdimm_bus))
2584 return 0;
2585
2586 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2587 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
2588 if (!nfit) {
2589 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
2590 return -ENODEV;
2591 }
2592 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
2593 sysfs_put(nfit);
2594 if (!acpi_desc->scrub_count_state) {
2595 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
2596 return -ENODEV;
2597 }
2598
2599 return 0;
2600}
2601
58cd71b4
DW
2602static void acpi_nfit_destruct(void *data)
2603{
2604 struct acpi_nfit_desc *acpi_desc = data;
37b137ff 2605 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
58cd71b4 2606
6839a6d9
VV
2607 /*
2608 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2609 * race teardown
2610 */
2611 mutex_lock(&acpi_desc_lock);
58cd71b4 2612 acpi_desc->cancel = 1;
37b137ff
VV
2613 /*
2614 * Bounce the nvdimm bus lock to make sure any in-flight
2615 * acpi_nfit_ars_rescan() submissions have had a chance to
2616 * either submit or see ->cancel set.
2617 */
2618 device_lock(bus_dev);
2619 device_unlock(bus_dev);
2620
58cd71b4 2621 flush_workqueue(nfit_wq);
37b137ff
VV
2622 if (acpi_desc->scrub_count_state)
2623 sysfs_put(acpi_desc->scrub_count_state);
58cd71b4
DW
2624 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2625 acpi_desc->nvdimm_bus = NULL;
6839a6d9
VV
2626 list_del(&acpi_desc->list);
2627 mutex_unlock(&acpi_desc_lock);
58cd71b4
DW
2628}
2629
e7a11b44 2630int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
b94d5230
DW
2631{
2632 struct device *dev = acpi_desc->dev;
20985164 2633 struct nfit_table_prev prev;
b94d5230 2634 const void *end;
1f7df6f8 2635 int rc;
b94d5230 2636
58cd71b4 2637 if (!acpi_desc->nvdimm_bus) {
37b137ff
VV
2638 acpi_nfit_init_dsms(acpi_desc);
2639
58cd71b4
DW
2640 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
2641 &acpi_desc->nd_desc);
2642 if (!acpi_desc->nvdimm_bus)
2643 return -ENOMEM;
37b137ff 2644
58cd71b4
DW
2645 rc = devm_add_action_or_reset(dev, acpi_nfit_destruct,
2646 acpi_desc);
2647 if (rc)
2648 return rc;
37b137ff
VV
2649
2650 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
2651 if (rc)
2652 return rc;
6839a6d9
VV
2653
2654 /* register this acpi_desc for mce notifications */
2655 mutex_lock(&acpi_desc_lock);
2656 list_add_tail(&acpi_desc->list, &acpi_descs);
2657 mutex_unlock(&acpi_desc_lock);
58cd71b4
DW
2658 }
2659
20985164
VV
2660 mutex_lock(&acpi_desc->init_mutex);
2661
2662 INIT_LIST_HEAD(&prev.spas);
2663 INIT_LIST_HEAD(&prev.memdevs);
2664 INIT_LIST_HEAD(&prev.dcrs);
2665 INIT_LIST_HEAD(&prev.bdws);
2666 INIT_LIST_HEAD(&prev.idts);
2667 INIT_LIST_HEAD(&prev.flushes);
2668
2669 list_cut_position(&prev.spas, &acpi_desc->spas,
2670 acpi_desc->spas.prev);
2671 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2672 acpi_desc->memdevs.prev);
2673 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2674 acpi_desc->dcrs.prev);
2675 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2676 acpi_desc->bdws.prev);
2677 list_cut_position(&prev.idts, &acpi_desc->idts,
2678 acpi_desc->idts.prev);
2679 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2680 acpi_desc->flushes.prev);
b94d5230 2681
b94d5230 2682 end = data + sz;
b94d5230 2683 while (!IS_ERR_OR_NULL(data))
20985164 2684 data = add_table(acpi_desc, &prev, data, end);
b94d5230
DW
2685
2686 if (IS_ERR(data)) {
2687 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2688 PTR_ERR(data));
20985164
VV
2689 rc = PTR_ERR(data);
2690 goto out_unlock;
b94d5230
DW
2691 }
2692
20985164
VV
2693 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2694 if (rc)
2695 goto out_unlock;
2696
81ed4e36
DW
2697 rc = nfit_mem_init(acpi_desc);
2698 if (rc)
20985164 2699 goto out_unlock;
62232e45 2700
1f7df6f8
DW
2701 rc = acpi_nfit_register_dimms(acpi_desc);
2702 if (rc)
20985164
VV
2703 goto out_unlock;
2704
2705 rc = acpi_nfit_register_regions(acpi_desc);
1f7df6f8 2706
20985164
VV
2707 out_unlock:
2708 mutex_unlock(&acpi_desc->init_mutex);
2709 return rc;
b94d5230 2710}
6bc75619 2711EXPORT_SYMBOL_GPL(acpi_nfit_init);
b94d5230 2712
7ae0fa43
DW
2713struct acpi_nfit_flush_work {
2714 struct work_struct work;
2715 struct completion cmp;
2716};
2717
2718static void flush_probe(struct work_struct *work)
2719{
2720 struct acpi_nfit_flush_work *flush;
2721
2722 flush = container_of(work, typeof(*flush), work);
2723 complete(&flush->cmp);
2724}
2725
2726static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2727{
2728 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2729 struct device *dev = acpi_desc->dev;
2730 struct acpi_nfit_flush_work flush;
e471486c 2731 int rc;
7ae0fa43
DW
2732
2733 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2734 device_lock(dev);
2735 device_unlock(dev);
2736
2737 /*
2738 * Scrub work could take 10s of seconds, userspace may give up so we
2739 * need to be interruptible while waiting.
2740 */
2741 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2742 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2743 queue_work(nfit_wq, &flush.work);
e471486c
DW
2744
2745 rc = wait_for_completion_interruptible(&flush.cmp);
2746 cancel_work_sync(&flush.work);
2747 return rc;
7ae0fa43
DW
2748}
2749
87bf572e
DW
2750static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2751 struct nvdimm *nvdimm, unsigned int cmd)
2752{
2753 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2754
2755 if (nvdimm)
2756 return 0;
2757 if (cmd != ND_CMD_ARS_START)
2758 return 0;
2759
2760 /*
2761 * The kernel and userspace may race to initiate a scrub, but
2762 * the scrub thread is prepared to lose that initial race. It
2763 * just needs guarantees that any ars it initiates are not
2764 * interrupted by any intervening start reqeusts from userspace.
2765 */
2766 if (work_busy(&acpi_desc->work))
2767 return -EBUSY;
2768
2769 return 0;
2770}
2771
6839a6d9 2772int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
37b137ff
VV
2773{
2774 struct device *dev = acpi_desc->dev;
2775 struct nfit_spa *nfit_spa;
2776
2777 if (work_busy(&acpi_desc->work))
2778 return -EBUSY;
2779
2780 if (acpi_desc->cancel)
2781 return 0;
2782
2783 mutex_lock(&acpi_desc->init_mutex);
2784 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2785 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2786
2787 if (nfit_spa_type(spa) != NFIT_SPA_PM)
2788 continue;
2789
2790 nfit_spa->ars_required = 1;
2791 }
2792 queue_work(nfit_wq, &acpi_desc->work);
2793 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
2794 mutex_unlock(&acpi_desc->init_mutex);
2795
2796 return 0;
2797}
2798
a61fe6f7 2799void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
b94d5230
DW
2800{
2801 struct nvdimm_bus_descriptor *nd_desc;
b94d5230
DW
2802
2803 dev_set_drvdata(dev, acpi_desc);
2804 acpi_desc->dev = dev;
6bc75619 2805 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
b94d5230
DW
2806 nd_desc = &acpi_desc->nd_desc;
2807 nd_desc->provider_name = "ACPI.NFIT";
bc9775d8 2808 nd_desc->module = THIS_MODULE;
b94d5230 2809 nd_desc->ndctl = acpi_nfit_ctl;
7ae0fa43 2810 nd_desc->flush_probe = acpi_nfit_flush_probe;
87bf572e 2811 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
45def22c 2812 nd_desc->attr_groups = acpi_nfit_attribute_groups;
b94d5230 2813
20985164
VV
2814 INIT_LIST_HEAD(&acpi_desc->spas);
2815 INIT_LIST_HEAD(&acpi_desc->dcrs);
2816 INIT_LIST_HEAD(&acpi_desc->bdws);
2817 INIT_LIST_HEAD(&acpi_desc->idts);
2818 INIT_LIST_HEAD(&acpi_desc->flushes);
2819 INIT_LIST_HEAD(&acpi_desc->memdevs);
2820 INIT_LIST_HEAD(&acpi_desc->dimms);
6839a6d9 2821 INIT_LIST_HEAD(&acpi_desc->list);
20985164 2822 mutex_init(&acpi_desc->init_mutex);
1cf03c00 2823 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
20985164 2824}
a61fe6f7 2825EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
20985164
VV
2826
2827static int acpi_nfit_add(struct acpi_device *adev)
2828{
2829 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
2830 struct acpi_nfit_desc *acpi_desc;
2831 struct device *dev = &adev->dev;
2832 struct acpi_table_header *tbl;
2833 acpi_status status = AE_OK;
2834 acpi_size sz;
31932041 2835 int rc = 0;
20985164 2836
6b11d1d6 2837 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
20985164
VV
2838 if (ACPI_FAILURE(status)) {
2839 /* This is ok, we could have an nvdimm hotplugged later */
2840 dev_dbg(dev, "failed to find NFIT at startup\n");
2841 return 0;
2842 }
6b11d1d6 2843 sz = tbl->length;
20985164 2844
a61fe6f7
DW
2845 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2846 if (!acpi_desc)
2847 return -ENOMEM;
2848 acpi_nfit_desc_init(acpi_desc, &adev->dev);
20985164 2849
e7a11b44 2850 /* Save the acpi header for exporting the revision via sysfs */
6b577c9d 2851 acpi_desc->acpi_header = *tbl;
20985164
VV
2852
2853 /* Evaluate _FIT and override with that if present */
2854 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2855 if (ACPI_SUCCESS(status) && buf.length > 0) {
e7a11b44
DW
2856 union acpi_object *obj = buf.pointer;
2857
2858 if (obj->type == ACPI_TYPE_BUFFER)
2859 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2860 obj->buffer.length);
2861 else
6b577c9d
LK
2862 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2863 __func__, (int) obj->type);
31932041
DW
2864 kfree(buf.pointer);
2865 } else
e7a11b44
DW
2866 /* skip over the lead-in header table */
2867 rc = acpi_nfit_init(acpi_desc, (void *) tbl
2868 + sizeof(struct acpi_table_nfit),
2869 sz - sizeof(struct acpi_table_nfit));
e7a11b44 2870 return rc;
b94d5230
DW
2871}
2872
2873static int acpi_nfit_remove(struct acpi_device *adev)
2874{
58cd71b4 2875 /* see acpi_nfit_destruct */
b94d5230
DW
2876 return 0;
2877}
2878
c14a868a 2879void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
20985164 2880{
c14a868a 2881 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
20985164 2882 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
e7a11b44 2883 union acpi_object *obj;
20985164
VV
2884 acpi_status status;
2885 int ret;
2886
2887 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2888
c09f1218
VV
2889 if (event != NFIT_NOTIFY_UPDATE)
2890 return;
2891
20985164
VV
2892 if (!dev->driver) {
2893 /* dev->driver may be null if we're being removed */
2894 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
c14a868a 2895 return;
20985164
VV
2896 }
2897
2898 if (!acpi_desc) {
a61fe6f7
DW
2899 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2900 if (!acpi_desc)
c14a868a
DW
2901 return;
2902 acpi_nfit_desc_init(acpi_desc, dev);
7ae0fa43
DW
2903 } else {
2904 /*
2905 * Finish previous registration before considering new
2906 * regions.
2907 */
2908 flush_workqueue(nfit_wq);
20985164
VV
2909 }
2910
2911 /* Evaluate _FIT */
c14a868a 2912 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
20985164
VV
2913 if (ACPI_FAILURE(status)) {
2914 dev_err(dev, "failed to evaluate _FIT\n");
c14a868a 2915 return;
20985164
VV
2916 }
2917
6b577c9d
LK
2918 obj = buf.pointer;
2919 if (obj->type == ACPI_TYPE_BUFFER) {
e7a11b44
DW
2920 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2921 obj->buffer.length);
31932041 2922 if (ret)
6b577c9d 2923 dev_err(dev, "failed to merge updated NFIT\n");
31932041 2924 } else
6b577c9d 2925 dev_err(dev, "Invalid _FIT\n");
20985164 2926 kfree(buf.pointer);
c14a868a
DW
2927}
2928EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
20985164 2929
c14a868a
DW
2930static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2931{
2932 device_lock(&adev->dev);
2933 __acpi_nfit_notify(&adev->dev, adev->handle, event);
2934 device_unlock(&adev->dev);
20985164
VV
2935}
2936
b94d5230
DW
2937static const struct acpi_device_id acpi_nfit_ids[] = {
2938 { "ACPI0012", 0 },
2939 { "", 0 },
2940};
2941MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2942
2943static struct acpi_driver acpi_nfit_driver = {
2944 .name = KBUILD_MODNAME,
2945 .ids = acpi_nfit_ids,
2946 .ops = {
2947 .add = acpi_nfit_add,
2948 .remove = acpi_nfit_remove,
20985164 2949 .notify = acpi_nfit_notify,
b94d5230
DW
2950 },
2951};
2952
2953static __init int nfit_init(void)
2954{
2955 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2956 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2957 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2958 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2959 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2960 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2961 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2962
2963 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2964 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2965 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2966 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2967 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2968 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2969 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2970 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2971 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2972 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
31eca76b
DW
2973 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
2974 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
e02fb726 2975 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
b94d5230 2976
7ae0fa43
DW
2977 nfit_wq = create_singlethread_workqueue("nfit");
2978 if (!nfit_wq)
2979 return -ENOMEM;
2980
6839a6d9
VV
2981 nfit_mce_register();
2982
b94d5230
DW
2983 return acpi_bus_register_driver(&acpi_nfit_driver);
2984}
2985
2986static __exit void nfit_exit(void)
2987{
6839a6d9 2988 nfit_mce_unregister();
b94d5230 2989 acpi_bus_unregister_driver(&acpi_nfit_driver);
7ae0fa43 2990 destroy_workqueue(nfit_wq);
6839a6d9 2991 WARN_ON(!list_empty(&acpi_descs));
b94d5230
DW
2992}
2993
2994module_init(nfit_init);
2995module_exit(nfit_exit);
2996MODULE_LICENSE("GPL v2");
2997MODULE_AUTHOR("Intel Corporation");