2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/delay.h>
19 #include <linux/list.h>
20 #include <linux/acpi.h>
21 #include <linux/sort.h>
22 #include <linux/pmem.h>
24 #include <asm/cacheflush.h>
28 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
31 #include <linux/io-64-nonatomic-hi-lo.h>
33 static bool force_enable_dimms
;
34 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
35 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
37 struct nfit_table_prev
{
38 struct list_head spas
;
39 struct list_head memdevs
;
40 struct list_head dcrs
;
41 struct list_head bdws
;
42 struct list_head idts
;
43 struct list_head flushes
;
46 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
48 const u8
*to_nfit_uuid(enum nfit_uuids id
)
52 EXPORT_SYMBOL(to_nfit_uuid
);
54 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
55 struct nvdimm_bus_descriptor
*nd_desc
)
57 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
60 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
62 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
65 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
68 if (!nd_desc
->provider_name
69 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
72 return to_acpi_device(acpi_desc
->dev
);
75 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
76 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
79 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
80 const struct nd_cmd_desc
*desc
= NULL
;
81 union acpi_object in_obj
, in_buf
, *out_obj
;
82 struct device
*dev
= acpi_desc
->dev
;
83 const char *cmd_name
, *dimm_name
;
84 unsigned long dsm_mask
;
91 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
92 struct acpi_device
*adev
= nfit_mem
->adev
;
96 dimm_name
= nvdimm_name(nvdimm
);
97 cmd_name
= nvdimm_cmd_name(cmd
);
98 dsm_mask
= nfit_mem
->dsm_mask
;
99 desc
= nd_cmd_dimm_desc(cmd
);
100 uuid
= to_nfit_uuid(NFIT_DEV_DIMM
);
101 handle
= adev
->handle
;
103 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
105 cmd_name
= nvdimm_bus_cmd_name(cmd
);
106 dsm_mask
= nd_desc
->dsm_mask
;
107 desc
= nd_cmd_bus_desc(cmd
);
108 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
109 handle
= adev
->handle
;
113 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
116 if (!test_bit(cmd
, &dsm_mask
))
119 in_obj
.type
= ACPI_TYPE_PACKAGE
;
120 in_obj
.package
.count
= 1;
121 in_obj
.package
.elements
= &in_buf
;
122 in_buf
.type
= ACPI_TYPE_BUFFER
;
123 in_buf
.buffer
.pointer
= buf
;
124 in_buf
.buffer
.length
= 0;
126 /* libnvdimm has already validated the input envelope */
127 for (i
= 0; i
< desc
->in_num
; i
++)
128 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
131 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
132 dev_dbg(dev
, "%s:%s cmd: %s input length: %d\n", __func__
,
133 dimm_name
, cmd_name
, in_buf
.buffer
.length
);
134 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
135 4, in_buf
.buffer
.pointer
, min_t(u32
, 128,
136 in_buf
.buffer
.length
), true);
139 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, cmd
, &in_obj
);
141 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
146 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
147 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
148 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
153 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
154 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
155 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
156 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
157 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
158 out_obj
->buffer
.length
), true);
161 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
162 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
163 (u32
*) out_obj
->buffer
.pointer
);
165 if (offset
+ out_size
> out_obj
->buffer
.length
) {
166 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
167 __func__
, dimm_name
, cmd_name
, i
);
171 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
172 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
173 __func__
, dimm_name
, cmd_name
, i
);
177 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
178 out_obj
->buffer
.pointer
+ offset
, out_size
);
181 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
184 * status valid, return the number of bytes left
185 * unfilled in the output buffer
187 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
189 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
190 __func__
, dimm_name
, cmd_name
, buf_len
,
203 static const char *spa_type_name(u16 type
)
205 static const char *to_name
[] = {
206 [NFIT_SPA_VOLATILE
] = "volatile",
207 [NFIT_SPA_PM
] = "pmem",
208 [NFIT_SPA_DCR
] = "dimm-control-region",
209 [NFIT_SPA_BDW
] = "block-data-window",
210 [NFIT_SPA_VDISK
] = "volatile-disk",
211 [NFIT_SPA_VCD
] = "volatile-cd",
212 [NFIT_SPA_PDISK
] = "persistent-disk",
213 [NFIT_SPA_PCD
] = "persistent-cd",
217 if (type
> NFIT_SPA_PCD
)
220 return to_name
[type
];
223 static int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
227 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
228 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
233 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
234 struct nfit_table_prev
*prev
,
235 struct acpi_nfit_system_address
*spa
)
237 size_t length
= min_t(size_t, sizeof(*spa
), spa
->header
.length
);
238 struct device
*dev
= acpi_desc
->dev
;
239 struct nfit_spa
*nfit_spa
;
241 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
242 if (memcmp(nfit_spa
->spa
, spa
, length
) == 0) {
243 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
248 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
), GFP_KERNEL
);
251 INIT_LIST_HEAD(&nfit_spa
->list
);
253 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
254 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
256 spa_type_name(nfit_spa_type(spa
)));
260 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
261 struct nfit_table_prev
*prev
,
262 struct acpi_nfit_memory_map
*memdev
)
264 size_t length
= min_t(size_t, sizeof(*memdev
), memdev
->header
.length
);
265 struct device
*dev
= acpi_desc
->dev
;
266 struct nfit_memdev
*nfit_memdev
;
268 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
269 if (memcmp(nfit_memdev
->memdev
, memdev
, length
) == 0) {
270 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
274 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
), GFP_KERNEL
);
277 INIT_LIST_HEAD(&nfit_memdev
->list
);
278 nfit_memdev
->memdev
= memdev
;
279 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
280 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
281 __func__
, memdev
->device_handle
, memdev
->range_index
,
282 memdev
->region_index
);
286 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
287 struct nfit_table_prev
*prev
,
288 struct acpi_nfit_control_region
*dcr
)
290 size_t length
= min_t(size_t, sizeof(*dcr
), dcr
->header
.length
);
291 struct device
*dev
= acpi_desc
->dev
;
292 struct nfit_dcr
*nfit_dcr
;
294 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
295 if (memcmp(nfit_dcr
->dcr
, dcr
, length
) == 0) {
296 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
300 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
), GFP_KERNEL
);
303 INIT_LIST_HEAD(&nfit_dcr
->list
);
305 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
306 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
307 dcr
->region_index
, dcr
->windows
);
311 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
312 struct nfit_table_prev
*prev
,
313 struct acpi_nfit_data_region
*bdw
)
315 size_t length
= min_t(size_t, sizeof(*bdw
), bdw
->header
.length
);
316 struct device
*dev
= acpi_desc
->dev
;
317 struct nfit_bdw
*nfit_bdw
;
319 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
320 if (memcmp(nfit_bdw
->bdw
, bdw
, length
) == 0) {
321 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
325 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
), GFP_KERNEL
);
328 INIT_LIST_HEAD(&nfit_bdw
->list
);
330 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
331 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
332 bdw
->region_index
, bdw
->windows
);
336 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
337 struct nfit_table_prev
*prev
,
338 struct acpi_nfit_interleave
*idt
)
340 size_t length
= min_t(size_t, sizeof(*idt
), idt
->header
.length
);
341 struct device
*dev
= acpi_desc
->dev
;
342 struct nfit_idt
*nfit_idt
;
344 list_for_each_entry(nfit_idt
, &prev
->idts
, list
)
345 if (memcmp(nfit_idt
->idt
, idt
, length
) == 0) {
346 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
350 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
), GFP_KERNEL
);
353 INIT_LIST_HEAD(&nfit_idt
->list
);
355 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
356 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
357 idt
->interleave_index
, idt
->line_count
);
361 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
362 struct nfit_table_prev
*prev
,
363 struct acpi_nfit_flush_address
*flush
)
365 size_t length
= min_t(size_t, sizeof(*flush
), flush
->header
.length
);
366 struct device
*dev
= acpi_desc
->dev
;
367 struct nfit_flush
*nfit_flush
;
369 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
)
370 if (memcmp(nfit_flush
->flush
, flush
, length
) == 0) {
371 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
375 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
), GFP_KERNEL
);
378 INIT_LIST_HEAD(&nfit_flush
->list
);
379 nfit_flush
->flush
= flush
;
380 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
381 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
382 flush
->device_handle
, flush
->hint_count
);
386 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
387 struct nfit_table_prev
*prev
, void *table
, const void *end
)
389 struct device
*dev
= acpi_desc
->dev
;
390 struct acpi_nfit_header
*hdr
;
391 void *err
= ERR_PTR(-ENOMEM
);
398 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
404 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
405 if (!add_spa(acpi_desc
, prev
, table
))
408 case ACPI_NFIT_TYPE_MEMORY_MAP
:
409 if (!add_memdev(acpi_desc
, prev
, table
))
412 case ACPI_NFIT_TYPE_CONTROL_REGION
:
413 if (!add_dcr(acpi_desc
, prev
, table
))
416 case ACPI_NFIT_TYPE_DATA_REGION
:
417 if (!add_bdw(acpi_desc
, prev
, table
))
420 case ACPI_NFIT_TYPE_INTERLEAVE
:
421 if (!add_idt(acpi_desc
, prev
, table
))
424 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
425 if (!add_flush(acpi_desc
, prev
, table
))
428 case ACPI_NFIT_TYPE_SMBIOS
:
429 dev_dbg(dev
, "%s: smbios\n", __func__
);
432 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
436 return table
+ hdr
->length
;
439 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
440 struct nfit_mem
*nfit_mem
)
442 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
443 u16 dcr
= nfit_mem
->dcr
->region_index
;
444 struct nfit_spa
*nfit_spa
;
446 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
447 u16 range_index
= nfit_spa
->spa
->range_index
;
448 int type
= nfit_spa_type(nfit_spa
->spa
);
449 struct nfit_memdev
*nfit_memdev
;
451 if (type
!= NFIT_SPA_BDW
)
454 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
455 if (nfit_memdev
->memdev
->range_index
!= range_index
)
457 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
459 if (nfit_memdev
->memdev
->region_index
!= dcr
)
462 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
467 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
468 nfit_mem
->spa_dcr
->range_index
);
469 nfit_mem
->bdw
= NULL
;
472 static int nfit_mem_add(struct acpi_nfit_desc
*acpi_desc
,
473 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
475 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
476 struct nfit_memdev
*nfit_memdev
;
477 struct nfit_flush
*nfit_flush
;
478 struct nfit_dcr
*nfit_dcr
;
479 struct nfit_bdw
*nfit_bdw
;
480 struct nfit_idt
*nfit_idt
;
481 u16 idt_idx
, range_index
;
483 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
484 if (nfit_dcr
->dcr
->region_index
!= dcr
)
486 nfit_mem
->dcr
= nfit_dcr
->dcr
;
490 if (!nfit_mem
->dcr
) {
491 dev_dbg(acpi_desc
->dev
, "SPA %d missing:%s%s\n",
492 spa
->range_index
, __to_nfit_memdev(nfit_mem
)
493 ? "" : " MEMDEV", nfit_mem
->dcr
? "" : " DCR");
498 * We've found enough to create an nvdimm, optionally
499 * find an associated BDW
501 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
503 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
504 if (nfit_bdw
->bdw
->region_index
!= dcr
)
506 nfit_mem
->bdw
= nfit_bdw
->bdw
;
513 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
515 if (!nfit_mem
->spa_bdw
)
518 range_index
= nfit_mem
->spa_bdw
->range_index
;
519 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
520 if (nfit_memdev
->memdev
->range_index
!= range_index
||
521 nfit_memdev
->memdev
->region_index
!= dcr
)
523 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
524 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
525 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
526 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
528 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
532 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
533 if (nfit_flush
->flush
->device_handle
!=
534 nfit_memdev
->memdev
->device_handle
)
536 nfit_mem
->nfit_flush
= nfit_flush
;
545 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
546 struct acpi_nfit_system_address
*spa
)
548 struct nfit_mem
*nfit_mem
, *found
;
549 struct nfit_memdev
*nfit_memdev
;
550 int type
= nfit_spa_type(spa
);
561 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
564 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
567 dcr
= nfit_memdev
->memdev
->region_index
;
568 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
569 if (__to_nfit_memdev(nfit_mem
)->region_index
== dcr
) {
577 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
578 sizeof(*nfit_mem
), GFP_KERNEL
);
581 INIT_LIST_HEAD(&nfit_mem
->list
);
584 if (type
== NFIT_SPA_DCR
) {
585 struct nfit_idt
*nfit_idt
;
588 /* multiple dimms may share a SPA when interleaved */
589 nfit_mem
->spa_dcr
= spa
;
590 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
591 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
592 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
593 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
595 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
600 * A single dimm may belong to multiple SPA-PM
601 * ranges, record at least one in addition to
604 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
610 rc
= nfit_mem_add(acpi_desc
, nfit_mem
, spa
);
618 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
620 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
621 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
622 u32 handleA
, handleB
;
624 handleA
= __to_nfit_memdev(a
)->device_handle
;
625 handleB
= __to_nfit_memdev(b
)->device_handle
;
626 if (handleA
< handleB
)
628 else if (handleA
> handleB
)
633 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
635 struct nfit_spa
*nfit_spa
;
638 * For each SPA-DCR or SPA-PMEM address range find its
639 * corresponding MEMDEV(s). From each MEMDEV find the
640 * corresponding DCR. Then, if we're operating on a SPA-DCR,
641 * try to find a SPA-BDW and a corresponding BDW that references
642 * the DCR. Throw it all into an nfit_mem object. Note, that
645 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
648 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
653 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
658 static ssize_t
revision_show(struct device
*dev
,
659 struct device_attribute
*attr
, char *buf
)
661 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
662 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
663 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
665 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
667 static DEVICE_ATTR_RO(revision
);
669 static struct attribute
*acpi_nfit_attributes
[] = {
670 &dev_attr_revision
.attr
,
674 static struct attribute_group acpi_nfit_attribute_group
= {
676 .attrs
= acpi_nfit_attributes
,
679 const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
680 &nvdimm_bus_attribute_group
,
681 &acpi_nfit_attribute_group
,
684 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups
);
686 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
688 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
689 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
691 return __to_nfit_memdev(nfit_mem
);
694 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
696 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
697 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
699 return nfit_mem
->dcr
;
702 static ssize_t
handle_show(struct device
*dev
,
703 struct device_attribute
*attr
, char *buf
)
705 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
707 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
709 static DEVICE_ATTR_RO(handle
);
711 static ssize_t
phys_id_show(struct device
*dev
,
712 struct device_attribute
*attr
, char *buf
)
714 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
716 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
718 static DEVICE_ATTR_RO(phys_id
);
720 static ssize_t
vendor_show(struct device
*dev
,
721 struct device_attribute
*attr
, char *buf
)
723 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
725 return sprintf(buf
, "%#x\n", dcr
->vendor_id
);
727 static DEVICE_ATTR_RO(vendor
);
729 static ssize_t
rev_id_show(struct device
*dev
,
730 struct device_attribute
*attr
, char *buf
)
732 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
734 return sprintf(buf
, "%#x\n", dcr
->revision_id
);
736 static DEVICE_ATTR_RO(rev_id
);
738 static ssize_t
device_show(struct device
*dev
,
739 struct device_attribute
*attr
, char *buf
)
741 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
743 return sprintf(buf
, "%#x\n", dcr
->device_id
);
745 static DEVICE_ATTR_RO(device
);
747 static ssize_t
format_show(struct device
*dev
,
748 struct device_attribute
*attr
, char *buf
)
750 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
752 return sprintf(buf
, "%#x\n", dcr
->code
);
754 static DEVICE_ATTR_RO(format
);
756 static ssize_t
serial_show(struct device
*dev
,
757 struct device_attribute
*attr
, char *buf
)
759 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
761 return sprintf(buf
, "%#x\n", dcr
->serial_number
);
763 static DEVICE_ATTR_RO(serial
);
765 static ssize_t
flags_show(struct device
*dev
,
766 struct device_attribute
*attr
, char *buf
)
768 u16 flags
= to_nfit_memdev(dev
)->flags
;
770 return sprintf(buf
, "%s%s%s%s%s\n",
771 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
772 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
773 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
774 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
775 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
777 static DEVICE_ATTR_RO(flags
);
779 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
780 &dev_attr_handle
.attr
,
781 &dev_attr_phys_id
.attr
,
782 &dev_attr_vendor
.attr
,
783 &dev_attr_device
.attr
,
784 &dev_attr_format
.attr
,
785 &dev_attr_serial
.attr
,
786 &dev_attr_rev_id
.attr
,
787 &dev_attr_flags
.attr
,
791 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
792 struct attribute
*a
, int n
)
794 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
796 if (to_nfit_dcr(dev
))
802 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
804 .attrs
= acpi_nfit_dimm_attributes
,
805 .is_visible
= acpi_nfit_dimm_attr_visible
,
808 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
809 &nvdimm_attribute_group
,
810 &nd_device_attribute_group
,
811 &acpi_nfit_dimm_attribute_group
,
815 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
818 struct nfit_mem
*nfit_mem
;
820 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
821 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
822 return nfit_mem
->nvdimm
;
827 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
828 struct nfit_mem
*nfit_mem
, u32 device_handle
)
830 struct acpi_device
*adev
, *adev_dimm
;
831 struct device
*dev
= acpi_desc
->dev
;
832 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_DIMM
);
835 nfit_mem
->dsm_mask
= acpi_desc
->dimm_dsm_force_en
;
836 adev
= to_acpi_dev(acpi_desc
);
840 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
841 nfit_mem
->adev
= adev_dimm
;
843 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
845 return force_enable_dimms
? 0 : -ENODEV
;
848 for (i
= ND_CMD_SMART
; i
<= ND_CMD_VENDOR
; i
++)
849 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
850 set_bit(i
, &nfit_mem
->dsm_mask
);
855 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
857 struct nfit_mem
*nfit_mem
;
860 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
861 struct nvdimm
*nvdimm
;
862 unsigned long flags
= 0;
867 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
868 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
874 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
875 flags
|= NDD_ALIASING
;
877 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
878 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
879 flags
|= NDD_UNARMED
;
881 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
885 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
886 acpi_nfit_dimm_attribute_groups
,
887 flags
, &nfit_mem
->dsm_mask
);
891 nfit_mem
->nvdimm
= nvdimm
;
894 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
897 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
899 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
900 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
901 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
902 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
906 return nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
909 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
911 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
912 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
913 struct acpi_device
*adev
;
916 nd_desc
->dsm_mask
= acpi_desc
->bus_dsm_force_en
;
917 adev
= to_acpi_dev(acpi_desc
);
921 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_ARS_STATUS
; i
++)
922 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
923 set_bit(i
, &nd_desc
->dsm_mask
);
926 static ssize_t
range_index_show(struct device
*dev
,
927 struct device_attribute
*attr
, char *buf
)
929 struct nd_region
*nd_region
= to_nd_region(dev
);
930 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
932 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
934 static DEVICE_ATTR_RO(range_index
);
936 static struct attribute
*acpi_nfit_region_attributes
[] = {
937 &dev_attr_range_index
.attr
,
941 static struct attribute_group acpi_nfit_region_attribute_group
= {
943 .attrs
= acpi_nfit_region_attributes
,
946 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
947 &nd_region_attribute_group
,
948 &nd_mapping_attribute_group
,
949 &nd_device_attribute_group
,
950 &nd_numa_attribute_group
,
951 &acpi_nfit_region_attribute_group
,
955 /* enough info to uniquely specify an interleave set */
956 struct nfit_set_info
{
957 struct nfit_set_info_map
{
964 static size_t sizeof_nfit_set_info(int num_mappings
)
966 return sizeof(struct nfit_set_info
)
967 + num_mappings
* sizeof(struct nfit_set_info_map
);
970 static int cmp_map(const void *m0
, const void *m1
)
972 const struct nfit_set_info_map
*map0
= m0
;
973 const struct nfit_set_info_map
*map1
= m1
;
975 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
979 /* Retrieve the nth entry referencing this spa */
980 static struct acpi_nfit_memory_map
*memdev_from_spa(
981 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
983 struct nfit_memdev
*nfit_memdev
;
985 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
986 if (nfit_memdev
->memdev
->range_index
== range_index
)
988 return nfit_memdev
->memdev
;
992 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
993 struct nd_region_desc
*ndr_desc
,
994 struct acpi_nfit_system_address
*spa
)
996 int i
, spa_type
= nfit_spa_type(spa
);
997 struct device
*dev
= acpi_desc
->dev
;
998 struct nd_interleave_set
*nd_set
;
999 u16 nr
= ndr_desc
->num_mappings
;
1000 struct nfit_set_info
*info
;
1002 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1007 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1011 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1014 for (i
= 0; i
< nr
; i
++) {
1015 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
1016 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1017 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1018 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1019 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1020 spa
->range_index
, i
);
1022 if (!memdev
|| !nfit_mem
->dcr
) {
1023 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1027 map
->region_offset
= memdev
->region_offset
;
1028 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1031 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1033 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1034 ndr_desc
->nd_set
= nd_set
;
1035 devm_kfree(dev
, info
);
1040 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1042 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1043 u32 sub_line_offset
, line_index
, line_offset
;
1044 u64 line_no
, table_skip_count
, table_offset
;
1046 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1047 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1048 line_offset
= idt
->line_offset
[line_index
]
1050 table_offset
= table_skip_count
* mmio
->table_size
;
1052 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1055 static void wmb_blk(struct nfit_blk
*nfit_blk
)
1058 if (nfit_blk
->nvdimm_flush
) {
1060 * The first wmb() is needed to 'sfence' all previous writes
1061 * such that they are architecturally visible for the platform
1062 * buffer flush. Note that we've already arranged for pmem
1063 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1064 * final wmb() ensures ordering for the NVDIMM flush write.
1067 writeq(1, nfit_blk
->nvdimm_flush
);
1073 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1075 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1076 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1078 if (mmio
->num_lines
)
1079 offset
= to_interleave_offset(offset
, mmio
);
1081 return readl(mmio
->addr
.base
+ offset
);
1084 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1085 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1088 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1091 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1093 BCW_LEN_MASK
= (1ULL << 8) - 1,
1097 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1098 len
= len
>> L1_CACHE_SHIFT
;
1099 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1100 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1102 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1103 if (mmio
->num_lines
)
1104 offset
= to_interleave_offset(offset
, mmio
);
1106 writeq(cmd
, mmio
->addr
.base
+ offset
);
1109 if (nfit_blk
->dimm_flags
& ND_BLK_DCR_LATCH
)
1110 readq(mmio
->addr
.base
+ offset
);
1113 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1114 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1117 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1118 unsigned int copied
= 0;
1122 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1123 + lane
* mmio
->size
;
1124 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1129 if (mmio
->num_lines
) {
1132 offset
= to_interleave_offset(base_offset
+ copied
,
1134 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1135 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1137 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1142 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1145 if (nfit_blk
->dimm_flags
& ND_BLK_READ_FLUSH
)
1146 mmio_flush_range((void __force
*)
1147 mmio
->addr
.aperture
+ offset
, c
);
1149 memcpy_from_pmem(iobuf
+ copied
,
1150 mmio
->addr
.aperture
+ offset
, c
);
1160 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1164 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1165 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1167 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1168 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1169 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1170 unsigned int lane
, copied
= 0;
1173 lane
= nd_region_acquire_lane(nd_region
);
1175 u64 c
= min(len
, mmio
->size
);
1177 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1178 iobuf
+ copied
, c
, rw
, lane
);
1185 nd_region_release_lane(nd_region
, lane
);
1190 static void nfit_spa_mapping_release(struct kref
*kref
)
1192 struct nfit_spa_mapping
*spa_map
= to_spa_map(kref
);
1193 struct acpi_nfit_system_address
*spa
= spa_map
->spa
;
1194 struct acpi_nfit_desc
*acpi_desc
= spa_map
->acpi_desc
;
1196 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1197 dev_dbg(acpi_desc
->dev
, "%s: SPA%d\n", __func__
, spa
->range_index
);
1198 if (spa_map
->type
== SPA_MAP_APERTURE
)
1199 memunmap((void __force
*)spa_map
->addr
.aperture
);
1201 iounmap(spa_map
->addr
.base
);
1202 release_mem_region(spa
->address
, spa
->length
);
1203 list_del(&spa_map
->list
);
1207 static struct nfit_spa_mapping
*find_spa_mapping(
1208 struct acpi_nfit_desc
*acpi_desc
,
1209 struct acpi_nfit_system_address
*spa
)
1211 struct nfit_spa_mapping
*spa_map
;
1213 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1214 list_for_each_entry(spa_map
, &acpi_desc
->spa_maps
, list
)
1215 if (spa_map
->spa
== spa
)
1221 static void nfit_spa_unmap(struct acpi_nfit_desc
*acpi_desc
,
1222 struct acpi_nfit_system_address
*spa
)
1224 struct nfit_spa_mapping
*spa_map
;
1226 mutex_lock(&acpi_desc
->spa_map_mutex
);
1227 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1230 kref_put(&spa_map
->kref
, nfit_spa_mapping_release
);
1231 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1234 static void __iomem
*__nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1235 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1237 resource_size_t start
= spa
->address
;
1238 resource_size_t n
= spa
->length
;
1239 struct nfit_spa_mapping
*spa_map
;
1240 struct resource
*res
;
1242 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1244 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1246 kref_get(&spa_map
->kref
);
1247 return spa_map
->addr
.base
;
1250 spa_map
= kzalloc(sizeof(*spa_map
), GFP_KERNEL
);
1254 INIT_LIST_HEAD(&spa_map
->list
);
1256 kref_init(&spa_map
->kref
);
1257 spa_map
->acpi_desc
= acpi_desc
;
1259 res
= request_mem_region(start
, n
, dev_name(acpi_desc
->dev
));
1263 spa_map
->type
= type
;
1264 if (type
== SPA_MAP_APERTURE
)
1265 spa_map
->addr
.aperture
= (void __pmem
*)memremap(start
, n
,
1266 ARCH_MEMREMAP_PMEM
);
1268 spa_map
->addr
.base
= ioremap_nocache(start
, n
);
1271 if (!spa_map
->addr
.base
)
1274 list_add_tail(&spa_map
->list
, &acpi_desc
->spa_maps
);
1275 return spa_map
->addr
.base
;
1278 release_mem_region(start
, n
);
1285 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1286 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1287 * @nfit_spa: spa table to map
1288 * @type: aperture or control region
1290 * In the case where block-data-window apertures and
1291 * dimm-control-regions are interleaved they will end up sharing a
1292 * single request_mem_region() + ioremap() for the address range. In
1293 * the style of devm nfit_spa_map() mappings are automatically dropped
1294 * when all region devices referencing the same mapping are disabled /
1297 static void __iomem
*nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1298 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1300 void __iomem
*iomem
;
1302 mutex_lock(&acpi_desc
->spa_map_mutex
);
1303 iomem
= __nfit_spa_map(acpi_desc
, spa
, type
);
1304 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1309 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1310 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1313 mmio
->num_lines
= idt
->line_count
;
1314 mmio
->line_size
= idt
->line_size
;
1315 if (interleave_ways
== 0)
1317 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1324 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1325 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1327 struct nd_cmd_dimm_flags flags
;
1330 memset(&flags
, 0, sizeof(flags
));
1331 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1334 if (rc
>= 0 && flags
.status
== 0)
1335 nfit_blk
->dimm_flags
= flags
.flags
;
1336 else if (rc
== -ENOTTY
) {
1337 /* fall back to a conservative default */
1338 nfit_blk
->dimm_flags
= ND_BLK_DCR_LATCH
| ND_BLK_READ_FLUSH
;
1346 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1349 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1350 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1351 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1352 struct nfit_flush
*nfit_flush
;
1353 struct nfit_blk_mmio
*mmio
;
1354 struct nfit_blk
*nfit_blk
;
1355 struct nfit_mem
*nfit_mem
;
1356 struct nvdimm
*nvdimm
;
1359 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1360 nfit_mem
= nvdimm_provider_data(nvdimm
);
1361 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1362 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1363 nfit_mem
? "" : " nfit_mem",
1364 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1365 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1369 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1372 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1373 nfit_blk
->nd_region
= to_nd_region(dev
);
1375 /* map block aperture memory */
1376 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1377 mmio
= &nfit_blk
->mmio
[BDW
];
1378 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_bdw
,
1380 if (!mmio
->addr
.base
) {
1381 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1382 nvdimm_name(nvdimm
));
1385 mmio
->size
= nfit_mem
->bdw
->size
;
1386 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1387 mmio
->idt
= nfit_mem
->idt_bdw
;
1388 mmio
->spa
= nfit_mem
->spa_bdw
;
1389 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1390 nfit_mem
->memdev_bdw
->interleave_ways
);
1392 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1393 __func__
, nvdimm_name(nvdimm
));
1397 /* map block control memory */
1398 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1399 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1400 mmio
= &nfit_blk
->mmio
[DCR
];
1401 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_dcr
,
1403 if (!mmio
->addr
.base
) {
1404 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1405 nvdimm_name(nvdimm
));
1408 mmio
->size
= nfit_mem
->dcr
->window_size
;
1409 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1410 mmio
->idt
= nfit_mem
->idt_dcr
;
1411 mmio
->spa
= nfit_mem
->spa_dcr
;
1412 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1413 nfit_mem
->memdev_dcr
->interleave_ways
);
1415 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1416 __func__
, nvdimm_name(nvdimm
));
1420 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1422 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1423 __func__
, nvdimm_name(nvdimm
));
1427 nfit_flush
= nfit_mem
->nfit_flush
;
1428 if (nfit_flush
&& nfit_flush
->flush
->hint_count
!= 0) {
1429 nfit_blk
->nvdimm_flush
= devm_ioremap_nocache(dev
,
1430 nfit_flush
->flush
->hint_address
[0], 8);
1431 if (!nfit_blk
->nvdimm_flush
)
1435 if (!arch_has_wmb_pmem() && !nfit_blk
->nvdimm_flush
)
1436 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1438 if (mmio
->line_size
== 0)
1441 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1442 + 8 > mmio
->line_size
) {
1443 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1445 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1446 + 8 > mmio
->line_size
) {
1447 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1454 static void acpi_nfit_blk_region_disable(struct nvdimm_bus
*nvdimm_bus
,
1457 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1458 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1459 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1460 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1464 return; /* never enabled */
1466 /* auto-free BLK spa mappings */
1467 for (i
= 0; i
< 2; i
++) {
1468 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[i
];
1470 if (mmio
->addr
.base
)
1471 nfit_spa_unmap(acpi_desc
, mmio
->spa
);
1473 nd_blk_region_set_provider_data(ndbr
, NULL
);
1474 /* devm will free nfit_blk */
1477 static int ars_get_cap(struct nvdimm_bus_descriptor
*nd_desc
,
1478 struct nd_cmd_ars_cap
*cmd
, u64 addr
, u64 length
)
1480 cmd
->address
= addr
;
1481 cmd
->length
= length
;
1483 return nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
1487 static int ars_do_start(struct nvdimm_bus_descriptor
*nd_desc
,
1488 struct nd_cmd_ars_start
*cmd
, u64 addr
, u64 length
)
1492 cmd
->address
= addr
;
1493 cmd
->length
= length
;
1494 cmd
->type
= ND_ARS_PERSISTENT
;
1497 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, cmd
,
1501 switch (cmd
->status
) {
1505 /* ARS unsupported, but we should never get here */
1510 /* ARS is in progress */
1519 static int ars_get_status(struct nvdimm_bus_descriptor
*nd_desc
,
1520 struct nd_cmd_ars_status
*cmd
)
1525 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, cmd
,
1527 if (rc
|| cmd
->status
& 0xffff)
1530 /* Check extended status (Upper two bytes) */
1531 switch (cmd
->status
>> 16) {
1535 /* ARS is in progress */
1539 /* No ARS performed for the current boot */
1547 static int ars_status_process_records(struct nvdimm_bus
*nvdimm_bus
,
1548 struct nd_cmd_ars_status
*ars_status
, u64 start
)
1554 * The address field returned by ars_status should be either
1555 * less than or equal to the address we last started ARS for.
1556 * The (start, length) returned by ars_status should also have
1557 * non-zero overlap with the range we started ARS for.
1558 * If this is not the case, bail.
1560 if (ars_status
->address
> start
||
1561 (ars_status
->address
+ ars_status
->length
< start
))
1564 for (i
= 0; i
< ars_status
->num_records
; i
++) {
1565 rc
= nvdimm_bus_add_poison(nvdimm_bus
,
1566 ars_status
->records
[i
].err_address
,
1567 ars_status
->records
[i
].length
);
1575 static int acpi_nfit_find_poison(struct acpi_nfit_desc
*acpi_desc
,
1576 struct nd_region_desc
*ndr_desc
)
1578 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1579 struct nvdimm_bus
*nvdimm_bus
= acpi_desc
->nvdimm_bus
;
1580 struct nd_cmd_ars_status
*ars_status
= NULL
;
1581 struct nd_cmd_ars_start
*ars_start
= NULL
;
1582 struct nd_cmd_ars_cap
*ars_cap
= NULL
;
1583 u64 start
, len
, cur
, remaining
;
1586 ars_cap
= kzalloc(sizeof(*ars_cap
), GFP_KERNEL
);
1590 start
= ndr_desc
->res
->start
;
1591 len
= ndr_desc
->res
->end
- ndr_desc
->res
->start
+ 1;
1593 rc
= ars_get_cap(nd_desc
, ars_cap
, start
, len
);
1598 * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
1599 * extended status is not set, skip this but continue initialization
1601 if ((ars_cap
->status
& 0xffff) ||
1602 !(ars_cap
->status
>> 16 & ND_ARS_PERSISTENT
)) {
1603 dev_warn(acpi_desc
->dev
,
1604 "ARS unsupported (status: 0x%x), won't create an error list\n",
1610 * Check if a full-range ARS has been run. If so, use those results
1611 * without having to start a new ARS.
1613 ars_status
= kzalloc(ars_cap
->max_ars_out
+ sizeof(*ars_status
),
1620 rc
= ars_get_status(nd_desc
, ars_status
);
1624 if (ars_status
->address
<= start
&&
1625 (ars_status
->address
+ ars_status
->length
>= start
+ len
)) {
1626 rc
= ars_status_process_records(nvdimm_bus
, ars_status
, start
);
1631 * ARS_STATUS can overflow if the number of poison entries found is
1632 * greater than the maximum buffer size (ars_cap->max_ars_out)
1633 * To detect overflow, check if the length field of ars_status
1634 * is less than the length we supplied. If so, process the
1635 * error entries we got, adjust the start point, and start again
1637 ars_start
= kzalloc(sizeof(*ars_start
), GFP_KERNEL
);
1646 rc
= ars_do_start(nd_desc
, ars_start
, cur
, remaining
);
1650 rc
= ars_get_status(nd_desc
, ars_status
);
1654 rc
= ars_status_process_records(nvdimm_bus
, ars_status
, cur
);
1658 end
= min(cur
+ remaining
,
1659 ars_status
->address
+ ars_status
->length
);
1663 } while (remaining
);
1672 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
1673 struct nd_mapping
*nd_mapping
, struct nd_region_desc
*ndr_desc
,
1674 struct acpi_nfit_memory_map
*memdev
,
1675 struct acpi_nfit_system_address
*spa
)
1677 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
1678 memdev
->device_handle
);
1679 struct nd_blk_region_desc
*ndbr_desc
;
1680 struct nfit_mem
*nfit_mem
;
1684 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
1685 spa
->range_index
, memdev
->device_handle
);
1689 nd_mapping
->nvdimm
= nvdimm
;
1690 switch (nfit_spa_type(spa
)) {
1692 case NFIT_SPA_VOLATILE
:
1693 nd_mapping
->start
= memdev
->address
;
1694 nd_mapping
->size
= memdev
->region_size
;
1697 nfit_mem
= nvdimm_provider_data(nvdimm
);
1698 if (!nfit_mem
|| !nfit_mem
->bdw
) {
1699 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
1700 spa
->range_index
, nvdimm_name(nvdimm
));
1702 nd_mapping
->size
= nfit_mem
->bdw
->capacity
;
1703 nd_mapping
->start
= nfit_mem
->bdw
->start_address
;
1704 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
1708 ndr_desc
->nd_mapping
= nd_mapping
;
1709 ndr_desc
->num_mappings
= blk_valid
;
1710 ndbr_desc
= to_blk_region_desc(ndr_desc
);
1711 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
1712 ndbr_desc
->disable
= acpi_nfit_blk_region_disable
;
1713 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
1714 if (!nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
, ndr_desc
))
1722 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
1723 struct nfit_spa
*nfit_spa
)
1725 static struct nd_mapping nd_mappings
[ND_MAX_MAPPINGS
];
1726 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1727 struct nd_blk_region_desc ndbr_desc
;
1728 struct nd_region_desc
*ndr_desc
;
1729 struct nfit_memdev
*nfit_memdev
;
1730 struct nvdimm_bus
*nvdimm_bus
;
1731 struct resource res
;
1734 if (nfit_spa
->is_registered
)
1737 if (spa
->range_index
== 0) {
1738 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
1743 memset(&res
, 0, sizeof(res
));
1744 memset(&nd_mappings
, 0, sizeof(nd_mappings
));
1745 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
1746 res
.start
= spa
->address
;
1747 res
.end
= res
.start
+ spa
->length
- 1;
1748 ndr_desc
= &ndbr_desc
.ndr_desc
;
1749 ndr_desc
->res
= &res
;
1750 ndr_desc
->provider_data
= nfit_spa
;
1751 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
1752 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
1753 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
1754 spa
->proximity_domain
);
1756 ndr_desc
->numa_node
= NUMA_NO_NODE
;
1758 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1759 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1760 struct nd_mapping
*nd_mapping
;
1762 if (memdev
->range_index
!= spa
->range_index
)
1764 if (count
>= ND_MAX_MAPPINGS
) {
1765 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
1766 spa
->range_index
, ND_MAX_MAPPINGS
);
1769 nd_mapping
= &nd_mappings
[count
++];
1770 rc
= acpi_nfit_init_mapping(acpi_desc
, nd_mapping
, ndr_desc
,
1776 ndr_desc
->nd_mapping
= nd_mappings
;
1777 ndr_desc
->num_mappings
= count
;
1778 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
1782 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
1783 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
1784 rc
= acpi_nfit_find_poison(acpi_desc
, ndr_desc
);
1786 dev_err(acpi_desc
->dev
,
1787 "error while performing ARS to find poison: %d\n",
1791 if (!nvdimm_pmem_region_create(nvdimm_bus
, ndr_desc
))
1793 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
1794 if (!nvdimm_volatile_region_create(nvdimm_bus
, ndr_desc
))
1798 nfit_spa
->is_registered
= 1;
1802 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
1804 struct nfit_spa
*nfit_spa
;
1806 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
1807 int rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
1815 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
1816 struct nfit_table_prev
*prev
)
1818 struct device
*dev
= acpi_desc
->dev
;
1820 if (!list_empty(&prev
->spas
) ||
1821 !list_empty(&prev
->memdevs
) ||
1822 !list_empty(&prev
->dcrs
) ||
1823 !list_empty(&prev
->bdws
) ||
1824 !list_empty(&prev
->idts
) ||
1825 !list_empty(&prev
->flushes
)) {
1826 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
1832 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, acpi_size sz
)
1834 struct device
*dev
= acpi_desc
->dev
;
1835 struct nfit_table_prev prev
;
1840 mutex_lock(&acpi_desc
->init_mutex
);
1842 INIT_LIST_HEAD(&prev
.spas
);
1843 INIT_LIST_HEAD(&prev
.memdevs
);
1844 INIT_LIST_HEAD(&prev
.dcrs
);
1845 INIT_LIST_HEAD(&prev
.bdws
);
1846 INIT_LIST_HEAD(&prev
.idts
);
1847 INIT_LIST_HEAD(&prev
.flushes
);
1849 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
1850 acpi_desc
->spas
.prev
);
1851 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
1852 acpi_desc
->memdevs
.prev
);
1853 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
1854 acpi_desc
->dcrs
.prev
);
1855 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
1856 acpi_desc
->bdws
.prev
);
1857 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
1858 acpi_desc
->idts
.prev
);
1859 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
1860 acpi_desc
->flushes
.prev
);
1862 data
= (u8
*) acpi_desc
->nfit
;
1864 while (!IS_ERR_OR_NULL(data
))
1865 data
= add_table(acpi_desc
, &prev
, data
, end
);
1868 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
1874 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
1878 if (nfit_mem_init(acpi_desc
) != 0) {
1883 acpi_nfit_init_dsms(acpi_desc
);
1885 rc
= acpi_nfit_register_dimms(acpi_desc
);
1889 rc
= acpi_nfit_register_regions(acpi_desc
);
1892 mutex_unlock(&acpi_desc
->init_mutex
);
1895 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
1897 static struct acpi_nfit_desc
*acpi_nfit_desc_init(struct acpi_device
*adev
)
1899 struct nvdimm_bus_descriptor
*nd_desc
;
1900 struct acpi_nfit_desc
*acpi_desc
;
1901 struct device
*dev
= &adev
->dev
;
1903 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
1905 return ERR_PTR(-ENOMEM
);
1907 dev_set_drvdata(dev
, acpi_desc
);
1908 acpi_desc
->dev
= dev
;
1909 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
1910 nd_desc
= &acpi_desc
->nd_desc
;
1911 nd_desc
->provider_name
= "ACPI.NFIT";
1912 nd_desc
->ndctl
= acpi_nfit_ctl
;
1913 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
1915 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
, nd_desc
);
1916 if (!acpi_desc
->nvdimm_bus
) {
1917 devm_kfree(dev
, acpi_desc
);
1918 return ERR_PTR(-ENXIO
);
1921 INIT_LIST_HEAD(&acpi_desc
->spa_maps
);
1922 INIT_LIST_HEAD(&acpi_desc
->spas
);
1923 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
1924 INIT_LIST_HEAD(&acpi_desc
->bdws
);
1925 INIT_LIST_HEAD(&acpi_desc
->idts
);
1926 INIT_LIST_HEAD(&acpi_desc
->flushes
);
1927 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
1928 INIT_LIST_HEAD(&acpi_desc
->dimms
);
1929 mutex_init(&acpi_desc
->spa_map_mutex
);
1930 mutex_init(&acpi_desc
->init_mutex
);
1935 static int acpi_nfit_add(struct acpi_device
*adev
)
1937 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1938 struct acpi_nfit_desc
*acpi_desc
;
1939 struct device
*dev
= &adev
->dev
;
1940 struct acpi_table_header
*tbl
;
1941 acpi_status status
= AE_OK
;
1945 status
= acpi_get_table_with_size("NFIT", 0, &tbl
, &sz
);
1946 if (ACPI_FAILURE(status
)) {
1947 /* This is ok, we could have an nvdimm hotplugged later */
1948 dev_dbg(dev
, "failed to find NFIT at startup\n");
1952 acpi_desc
= acpi_nfit_desc_init(adev
);
1953 if (IS_ERR(acpi_desc
)) {
1954 dev_err(dev
, "%s: error initializing acpi_desc: %ld\n",
1955 __func__
, PTR_ERR(acpi_desc
));
1956 return PTR_ERR(acpi_desc
);
1960 * Save the acpi header for later and then skip it,
1961 * making nfit point to the first nfit table header.
1963 acpi_desc
->acpi_header
= *tbl
;
1964 acpi_desc
->nfit
= (void *) tbl
+ sizeof(struct acpi_table_nfit
);
1965 sz
-= sizeof(struct acpi_table_nfit
);
1967 /* Evaluate _FIT and override with that if present */
1968 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
1969 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
1970 union acpi_object
*obj
;
1972 * Adjust for the acpi_object header of the _FIT
1975 if (obj
->type
== ACPI_TYPE_BUFFER
) {
1977 (struct acpi_nfit_header
*)obj
->buffer
.pointer
;
1978 sz
= obj
->buffer
.length
;
1980 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
1981 __func__
, (int) obj
->type
);
1984 rc
= acpi_nfit_init(acpi_desc
, sz
);
1986 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
1992 static int acpi_nfit_remove(struct acpi_device
*adev
)
1994 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
1996 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
2000 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
2002 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
2003 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2004 struct acpi_nfit_header
*nfit_saved
;
2005 union acpi_object
*obj
;
2006 struct device
*dev
= &adev
->dev
;
2010 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
2014 /* dev->driver may be null if we're being removed */
2015 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
2020 acpi_desc
= acpi_nfit_desc_init(adev
);
2021 if (IS_ERR(acpi_desc
)) {
2022 dev_err(dev
, "%s: error initializing acpi_desc: %ld\n",
2023 __func__
, PTR_ERR(acpi_desc
));
2029 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2030 if (ACPI_FAILURE(status
)) {
2031 dev_err(dev
, "failed to evaluate _FIT\n");
2035 nfit_saved
= acpi_desc
->nfit
;
2037 if (obj
->type
== ACPI_TYPE_BUFFER
) {
2039 (struct acpi_nfit_header
*)obj
->buffer
.pointer
;
2040 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.length
);
2042 /* Merge failed, restore old nfit, and exit */
2043 acpi_desc
->nfit
= nfit_saved
;
2044 dev_err(dev
, "failed to merge updated NFIT\n");
2047 /* Bad _FIT, restore old nfit */
2048 dev_err(dev
, "Invalid _FIT\n");
2056 static const struct acpi_device_id acpi_nfit_ids
[] = {
2060 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
2062 static struct acpi_driver acpi_nfit_driver
= {
2063 .name
= KBUILD_MODNAME
,
2064 .ids
= acpi_nfit_ids
,
2066 .add
= acpi_nfit_add
,
2067 .remove
= acpi_nfit_remove
,
2068 .notify
= acpi_nfit_notify
,
2072 static __init
int nfit_init(void)
2074 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
2075 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
2076 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
2077 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
2078 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
2079 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
2080 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
2082 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
2083 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
2084 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
2085 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
2086 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
2087 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
2088 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
2089 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
2090 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
2091 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
2093 return acpi_bus_register_driver(&acpi_nfit_driver
);
2096 static __exit
void nfit_exit(void)
2098 acpi_bus_unregister_driver(&acpi_nfit_driver
);
2101 module_init(nfit_init
);
2102 module_exit(nfit_exit
);
2103 MODULE_LICENSE("GPL v2");
2104 MODULE_AUTHOR("Intel Corporation");