]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/memory.c
memory hotplug: Allow memory blocks to span multiple memory sections
[mirror_ubuntu-artful-kernel.git] / drivers / base / memory.c
CommitLineData
3947be19
DH
1/*
2 * drivers/base/memory.c - basic Memory class support
3 *
4 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
5 * Dave Hansen <haveblue@us.ibm.com>
6 *
7 * This file provides the necessary infrastructure to represent
8 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
9 * All arch-independent code that assumes MEMORY_HOTPLUG requires
10 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
11 */
12
13#include <linux/sysdev.h>
14#include <linux/module.h>
15#include <linux/init.h>
3947be19 16#include <linux/topology.h>
c59ede7b 17#include <linux/capability.h>
3947be19
DH
18#include <linux/device.h>
19#include <linux/memory.h>
20#include <linux/kobject.h>
21#include <linux/memory_hotplug.h>
22#include <linux/mm.h>
da19cbcf 23#include <linux/mutex.h>
9f1b16a5 24#include <linux/stat.h>
5a0e3ad6 25#include <linux/slab.h>
9f1b16a5 26
3947be19
DH
27#include <asm/atomic.h>
28#include <asm/uaccess.h>
29
2938ffbd
NF
30static DEFINE_MUTEX(mem_sysfs_mutex);
31
3947be19 32#define MEMORY_CLASS_NAME "memory"
0c2c99b1
NF
33#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
34
35static int sections_per_block;
36
37static inline int base_memory_block_id(int section_nr)
38{
39 return section_nr / sections_per_block;
40}
3947be19
DH
41
42static struct sysdev_class memory_sysdev_class = {
af5ca3f4 43 .name = MEMORY_CLASS_NAME,
3947be19 44};
3947be19 45
312c004d 46static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
3947be19
DH
47{
48 return MEMORY_CLASS_NAME;
49}
50
9ec0fd4e 51static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uevent_env *env)
3947be19
DH
52{
53 int retval = 0;
54
55 return retval;
56}
57
9cd43611 58static const struct kset_uevent_ops memory_uevent_ops = {
312c004d
KS
59 .name = memory_uevent_name,
60 .uevent = memory_uevent,
3947be19
DH
61};
62
e041c683 63static BLOCKING_NOTIFIER_HEAD(memory_chain);
3947be19 64
98a38ebd 65int register_memory_notifier(struct notifier_block *nb)
3947be19 66{
e041c683 67 return blocking_notifier_chain_register(&memory_chain, nb);
3947be19 68}
3c82c30c 69EXPORT_SYMBOL(register_memory_notifier);
3947be19 70
98a38ebd 71void unregister_memory_notifier(struct notifier_block *nb)
3947be19 72{
e041c683 73 blocking_notifier_chain_unregister(&memory_chain, nb);
3947be19 74}
3c82c30c 75EXPORT_SYMBOL(unregister_memory_notifier);
3947be19 76
925cc71e
RJ
77static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
78
79int register_memory_isolate_notifier(struct notifier_block *nb)
80{
81 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
82}
83EXPORT_SYMBOL(register_memory_isolate_notifier);
84
85void unregister_memory_isolate_notifier(struct notifier_block *nb)
86{
87 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
88}
89EXPORT_SYMBOL(unregister_memory_isolate_notifier);
90
3947be19
DH
91/*
92 * register_memory - Setup a sysfs device for a memory block
93 */
00a41db5 94static
0c2c99b1 95int register_memory(struct memory_block *memory)
3947be19
DH
96{
97 int error;
98
99 memory->sysdev.cls = &memory_sysdev_class;
0c2c99b1 100 memory->sysdev.id = memory->phys_index / sections_per_block;
3947be19
DH
101
102 error = sysdev_register(&memory->sysdev);
3947be19
DH
103 return error;
104}
105
106static void
0c2c99b1 107unregister_memory(struct memory_block *memory)
3947be19
DH
108{
109 BUG_ON(memory->sysdev.cls != &memory_sysdev_class);
3947be19 110
00a41db5
BP
111 /* drop the ref. we got in remove_memory_block() */
112 kobject_put(&memory->sysdev.kobj);
3947be19 113 sysdev_unregister(&memory->sysdev);
3947be19
DH
114}
115
0c2c99b1
NF
116unsigned long __weak memory_block_size_bytes(void)
117{
118 return MIN_MEMORY_BLOCK_SIZE;
119}
120
121static unsigned long get_memory_block_size(void)
122{
123 unsigned long block_sz;
124
125 block_sz = memory_block_size_bytes();
126
127 /* Validate blk_sz is a power of 2 and not less than section size */
128 if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
129 WARN_ON(1);
130 block_sz = MIN_MEMORY_BLOCK_SIZE;
131 }
132
133 return block_sz;
134}
135
3947be19
DH
136/*
137 * use this as the physical section index that this memsection
138 * uses.
139 */
140
4a0b2b4d
AK
141static ssize_t show_mem_phys_index(struct sys_device *dev,
142 struct sysdev_attribute *attr, char *buf)
3947be19
DH
143{
144 struct memory_block *mem =
145 container_of(dev, struct memory_block, sysdev);
0c2c99b1 146 return sprintf(buf, "%08lx\n", mem->phys_index / sections_per_block);
3947be19
DH
147}
148
5c755e9f
BP
149/*
150 * Show whether the section of memory is likely to be hot-removable
151 */
1f07be1c
SR
152static ssize_t show_mem_removable(struct sys_device *dev,
153 struct sysdev_attribute *attr, char *buf)
5c755e9f 154{
0c2c99b1
NF
155 unsigned long i, pfn;
156 int ret = 1;
5c755e9f
BP
157 struct memory_block *mem =
158 container_of(dev, struct memory_block, sysdev);
159
0c2c99b1
NF
160 for (i = 0; i < sections_per_block; i++) {
161 pfn = section_nr_to_pfn(mem->phys_index + i);
162 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
163 }
164
5c755e9f
BP
165 return sprintf(buf, "%d\n", ret);
166}
167
3947be19
DH
168/*
169 * online, offline, going offline, etc.
170 */
4a0b2b4d
AK
171static ssize_t show_mem_state(struct sys_device *dev,
172 struct sysdev_attribute *attr, char *buf)
3947be19
DH
173{
174 struct memory_block *mem =
175 container_of(dev, struct memory_block, sysdev);
176 ssize_t len = 0;
177
178 /*
179 * We can probably put these states in a nice little array
180 * so that they're not open-coded
181 */
182 switch (mem->state) {
183 case MEM_ONLINE:
184 len = sprintf(buf, "online\n");
185 break;
186 case MEM_OFFLINE:
187 len = sprintf(buf, "offline\n");
188 break;
189 case MEM_GOING_OFFLINE:
190 len = sprintf(buf, "going-offline\n");
191 break;
192 default:
193 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
194 mem->state);
195 WARN_ON(1);
196 break;
197 }
198
199 return len;
200}
201
7b78d335 202int memory_notify(unsigned long val, void *v)
3947be19 203{
e041c683 204 return blocking_notifier_call_chain(&memory_chain, val, v);
3947be19
DH
205}
206
925cc71e
RJ
207int memory_isolate_notify(unsigned long val, void *v)
208{
209 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
210}
211
3947be19
DH
212/*
213 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
214 * OK to have direct references to sparsemem variables in here.
215 */
216static int
0c2c99b1 217memory_section_action(unsigned long phys_index, unsigned long action)
3947be19
DH
218{
219 int i;
3947be19
DH
220 unsigned long start_pfn, start_paddr;
221 struct page *first_page;
222 int ret;
3947be19 223
0c2c99b1 224 first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
3947be19
DH
225
226 /*
227 * The probe routines leave the pages reserved, just
228 * as the bootmem code does. Make sure they're still
229 * that way.
230 */
231 if (action == MEM_ONLINE) {
232 for (i = 0; i < PAGES_PER_SECTION; i++) {
233 if (PageReserved(first_page+i))
234 continue;
235
236 printk(KERN_WARNING "section number %ld page number %d "
0c2c99b1
NF
237 "not reserved, was it already online?\n",
238 phys_index, i);
3947be19
DH
239 return -EBUSY;
240 }
241 }
242
243 switch (action) {
244 case MEM_ONLINE:
245 start_pfn = page_to_pfn(first_page);
246 ret = online_pages(start_pfn, PAGES_PER_SECTION);
247 break;
248 case MEM_OFFLINE:
3947be19
DH
249 start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
250 ret = remove_memory(start_paddr,
251 PAGES_PER_SECTION << PAGE_SHIFT);
3947be19
DH
252 break;
253 default:
0c2c99b1
NF
254 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
255 "%ld\n", __func__, phys_index, action, action);
3947be19
DH
256 ret = -EINVAL;
257 }
3947be19
DH
258
259 return ret;
260}
261
262static int memory_block_change_state(struct memory_block *mem,
263 unsigned long to_state, unsigned long from_state_req)
264{
0c2c99b1
NF
265 int i, ret = 0;
266
da19cbcf 267 mutex_lock(&mem->state_mutex);
3947be19
DH
268
269 if (mem->state != from_state_req) {
270 ret = -EINVAL;
271 goto out;
272 }
273
0c2c99b1
NF
274 if (to_state == MEM_OFFLINE)
275 mem->state = MEM_GOING_OFFLINE;
276
277 for (i = 0; i < sections_per_block; i++) {
278 ret = memory_section_action(mem->phys_index + i, to_state);
279 if (ret)
280 break;
281 }
282
283 if (ret) {
284 for (i = 0; i < sections_per_block; i++)
285 memory_section_action(mem->phys_index + i,
286 from_state_req);
287
288 mem->state = from_state_req;
289 } else
3947be19
DH
290 mem->state = to_state;
291
292out:
da19cbcf 293 mutex_unlock(&mem->state_mutex);
3947be19
DH
294 return ret;
295}
296
297static ssize_t
4a0b2b4d
AK
298store_mem_state(struct sys_device *dev,
299 struct sysdev_attribute *attr, const char *buf, size_t count)
3947be19
DH
300{
301 struct memory_block *mem;
3947be19
DH
302 int ret = -EINVAL;
303
304 mem = container_of(dev, struct memory_block, sysdev);
3947be19
DH
305
306 if (!strncmp(buf, "online", min((int)count, 6)))
307 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
308 else if(!strncmp(buf, "offline", min((int)count, 7)))
309 ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
0c2c99b1 310
3947be19
DH
311 if (ret)
312 return ret;
313 return count;
314}
315
316/*
317 * phys_device is a bad name for this. What I really want
318 * is a way to differentiate between memory ranges that
319 * are part of physical devices that constitute
320 * a complete removable unit or fru.
321 * i.e. do these ranges belong to the same physical device,
322 * s.t. if I offline all of these sections I can then
323 * remove the physical device?
324 */
4a0b2b4d
AK
325static ssize_t show_phys_device(struct sys_device *dev,
326 struct sysdev_attribute *attr, char *buf)
3947be19
DH
327{
328 struct memory_block *mem =
329 container_of(dev, struct memory_block, sysdev);
330 return sprintf(buf, "%d\n", mem->phys_device);
331}
332
333static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL);
334static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
335static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
5c755e9f 336static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
3947be19
DH
337
338#define mem_create_simple_file(mem, attr_name) \
339 sysdev_create_file(&mem->sysdev, &attr_##attr_name)
340#define mem_remove_simple_file(mem, attr_name) \
341 sysdev_remove_file(&mem->sysdev, &attr_##attr_name)
342
343/*
344 * Block size attribute stuff
345 */
346static ssize_t
8564a6c1
AK
347print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
348 char *buf)
3947be19 349{
0c2c99b1 350 return sprintf(buf, "%lx\n", get_memory_block_size());
3947be19
DH
351}
352
8564a6c1 353static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
3947be19
DH
354
355static int block_size_init(void)
356{
28ec24e2 357 return sysfs_create_file(&memory_sysdev_class.kset.kobj,
8564a6c1 358 &attr_block_size_bytes.attr);
3947be19
DH
359}
360
361/*
362 * Some architectures will have custom drivers to do this, and
363 * will not need to do it from userspace. The fake hot-add code
364 * as well as ppc64 will do all of their discovery in userspace
365 * and will require this interface.
366 */
367#ifdef CONFIG_ARCH_MEMORY_PROBE
368static ssize_t
28812fe1
AK
369memory_probe_store(struct class *class, struct class_attribute *attr,
370 const char *buf, size_t count)
3947be19
DH
371{
372 u64 phys_addr;
bc02af93 373 int nid;
3947be19
DH
374 int ret;
375
376 phys_addr = simple_strtoull(buf, NULL, 0);
377
bc02af93
YG
378 nid = memory_add_physaddr_to_nid(phys_addr);
379 ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT);
3947be19
DH
380
381 if (ret)
382 count = ret;
383
384 return count;
385}
bd796671 386static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
3947be19
DH
387
388static int memory_probe_init(void)
389{
28ec24e2 390 return sysfs_create_file(&memory_sysdev_class.kset.kobj,
bd796671 391 &class_attr_probe.attr);
3947be19
DH
392}
393#else
28ec24e2
AM
394static inline int memory_probe_init(void)
395{
396 return 0;
397}
3947be19
DH
398#endif
399
facb6011
AK
400#ifdef CONFIG_MEMORY_FAILURE
401/*
402 * Support for offlining pages of memory
403 */
404
405/* Soft offline a page */
406static ssize_t
28812fe1
AK
407store_soft_offline_page(struct class *class,
408 struct class_attribute *attr,
409 const char *buf, size_t count)
facb6011
AK
410{
411 int ret;
412 u64 pfn;
413 if (!capable(CAP_SYS_ADMIN))
414 return -EPERM;
415 if (strict_strtoull(buf, 0, &pfn) < 0)
416 return -EINVAL;
417 pfn >>= PAGE_SHIFT;
418 if (!pfn_valid(pfn))
419 return -ENXIO;
420 ret = soft_offline_page(pfn_to_page(pfn), 0);
421 return ret == 0 ? count : ret;
422}
423
424/* Forcibly offline a page, including killing processes. */
425static ssize_t
28812fe1
AK
426store_hard_offline_page(struct class *class,
427 struct class_attribute *attr,
428 const char *buf, size_t count)
facb6011
AK
429{
430 int ret;
431 u64 pfn;
432 if (!capable(CAP_SYS_ADMIN))
433 return -EPERM;
434 if (strict_strtoull(buf, 0, &pfn) < 0)
435 return -EINVAL;
436 pfn >>= PAGE_SHIFT;
437 ret = __memory_failure(pfn, 0, 0);
438 return ret ? ret : count;
439}
440
bd796671
GKH
441static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
442static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
facb6011
AK
443
444static __init int memory_fail_init(void)
445{
446 int err;
447
448 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
bd796671 449 &class_attr_soft_offline_page.attr);
facb6011
AK
450 if (!err)
451 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
bd796671 452 &class_attr_hard_offline_page.attr);
facb6011
AK
453 return err;
454}
455#else
456static inline int memory_fail_init(void)
457{
458 return 0;
459}
460#endif
461
3947be19
DH
462/*
463 * Note that phys_device is optional. It is here to allow for
464 * differentiation between which *physical* devices each
465 * section belongs to...
466 */
bc32df00
HC
467int __weak arch_get_memory_phys_device(unsigned long start_pfn)
468{
469 return 0;
470}
3947be19 471
98383031
RH
472struct memory_block *find_memory_block_hinted(struct mem_section *section,
473 struct memory_block *hint)
3947be19
DH
474{
475 struct kobject *kobj;
476 struct sys_device *sysdev;
477 struct memory_block *mem;
478 char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
0c2c99b1 479 int block_id = base_memory_block_id(__section_nr(section));
3947be19 480
98383031
RH
481 kobj = hint ? &hint->sysdev.kobj : NULL;
482
3947be19
DH
483 /*
484 * This only works because we know that section == sysdev->id
485 * slightly redundant with sysdev_register()
486 */
0c2c99b1 487 sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id);
3947be19 488
98383031 489 kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
3947be19
DH
490 if (!kobj)
491 return NULL;
492
493 sysdev = container_of(kobj, struct sys_device, kobj);
494 mem = container_of(sysdev, struct memory_block, sysdev);
495
496 return mem;
497}
498
98383031
RH
499/*
500 * For now, we have a linear search to go find the appropriate
501 * memory_block corresponding to a particular phys_index. If
502 * this gets to be a real problem, we can always use a radix
503 * tree or something here.
504 *
505 * This could be made generic for all sysdev classes.
506 */
507struct memory_block *find_memory_block(struct mem_section *section)
508{
509 return find_memory_block_hinted(section, NULL);
510}
511
0c2c99b1
NF
512static int init_memory_block(struct memory_block **memory,
513 struct mem_section *section, unsigned long state)
e4619c85 514{
0c2c99b1 515 struct memory_block *mem;
e4619c85 516 unsigned long start_pfn;
0c2c99b1 517 int scn_nr;
e4619c85
NF
518 int ret = 0;
519
0c2c99b1 520 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
e4619c85
NF
521 if (!mem)
522 return -ENOMEM;
523
0c2c99b1
NF
524 scn_nr = __section_nr(section);
525 mem->phys_index = base_memory_block_id(scn_nr) * sections_per_block;
e4619c85 526 mem->state = state;
07681215 527 mem->section_count++;
e4619c85
NF
528 mutex_init(&mem->state_mutex);
529 start_pfn = section_nr_to_pfn(mem->phys_index);
530 mem->phys_device = arch_get_memory_phys_device(start_pfn);
531
0c2c99b1 532 ret = register_memory(mem);
e4619c85
NF
533 if (!ret)
534 ret = mem_create_simple_file(mem, phys_index);
535 if (!ret)
536 ret = mem_create_simple_file(mem, state);
537 if (!ret)
538 ret = mem_create_simple_file(mem, phys_device);
539 if (!ret)
540 ret = mem_create_simple_file(mem, removable);
0c2c99b1
NF
541
542 *memory = mem;
543 return ret;
544}
545
546static int add_memory_section(int nid, struct mem_section *section,
547 unsigned long state, enum mem_add_context context)
548{
549 struct memory_block *mem;
550 int ret = 0;
551
552 mutex_lock(&mem_sysfs_mutex);
553
554 mem = find_memory_block(section);
555 if (mem) {
556 mem->section_count++;
557 kobject_put(&mem->sysdev.kobj);
558 } else
559 ret = init_memory_block(&mem, section, state);
560
e4619c85 561 if (!ret) {
0c2c99b1
NF
562 if (context == HOTPLUG &&
563 mem->section_count == sections_per_block)
e4619c85
NF
564 ret = register_mem_sect_under_node(mem, nid);
565 }
566
2938ffbd 567 mutex_unlock(&mem_sysfs_mutex);
e4619c85
NF
568 return ret;
569}
570
3947be19
DH
571int remove_memory_block(unsigned long node_id, struct mem_section *section,
572 int phys_device)
573{
574 struct memory_block *mem;
575
2938ffbd 576 mutex_lock(&mem_sysfs_mutex);
3947be19 577 mem = find_memory_block(section);
07681215
NF
578
579 mem->section_count--;
580 if (mem->section_count == 0) {
581 unregister_mem_sect_under_nodes(mem);
582 mem_remove_simple_file(mem, phys_index);
583 mem_remove_simple_file(mem, state);
584 mem_remove_simple_file(mem, phys_device);
585 mem_remove_simple_file(mem, removable);
0c2c99b1
NF
586 unregister_memory(mem);
587 kfree(mem);
588 } else
589 kobject_put(&mem->sysdev.kobj);
3947be19 590
2938ffbd 591 mutex_unlock(&mem_sysfs_mutex);
3947be19
DH
592 return 0;
593}
594
595/*
596 * need an interface for the VM to add new memory regions,
597 * but without onlining it.
598 */
c04fc586 599int register_new_memory(int nid, struct mem_section *section)
3947be19 600{
0c2c99b1 601 return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG);
3947be19
DH
602}
603
604int unregister_memory_section(struct mem_section *section)
605{
540557b9 606 if (!present_section(section))
3947be19
DH
607 return -EINVAL;
608
609 return remove_memory_block(0, section, 0);
610}
611
612/*
613 * Initialize the sysfs support for memory devices...
614 */
615int __init memory_dev_init(void)
616{
617 unsigned int i;
618 int ret;
28ec24e2 619 int err;
0c2c99b1 620 unsigned long block_sz;
3947be19 621
312c004d 622 memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
3947be19 623 ret = sysdev_class_register(&memory_sysdev_class);
28ec24e2
AM
624 if (ret)
625 goto out;
3947be19 626
0c2c99b1
NF
627 block_sz = get_memory_block_size();
628 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
629
3947be19
DH
630 /*
631 * Create entries for memory sections that were found
632 * during boot and have been initialized
633 */
634 for (i = 0; i < NR_MEM_SECTIONS; i++) {
540557b9 635 if (!present_section_nr(i))
3947be19 636 continue;
0c2c99b1
NF
637 err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE,
638 BOOT);
28ec24e2
AM
639 if (!ret)
640 ret = err;
3947be19
DH
641 }
642
28ec24e2 643 err = memory_probe_init();
facb6011
AK
644 if (!ret)
645 ret = err;
646 err = memory_fail_init();
28ec24e2
AM
647 if (!ret)
648 ret = err;
649 err = block_size_init();
650 if (!ret)
651 ret = err;
652out:
653 if (ret)
2b3a302a 654 printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
3947be19
DH
655 return ret;
656}