]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/powerpc/platforms/pseries/cmm.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / platforms / pseries / cmm.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
84af458b
BK
2/*
3 * Collaborative memory management interface.
4 *
5 * Copyright (C) 2008 IBM Corporation
6 * Author(s): Brian King (brking@linux.vnet.ibm.com),
84af458b
BK
7 */
8
9#include <linux/ctype.h>
10#include <linux/delay.h>
11#include <linux/errno.h>
12#include <linux/fs.h>
5a0e3ad6 13#include <linux/gfp.h>
84af458b
BK
14#include <linux/kthread.h>
15#include <linux/module.h>
16#include <linux/oom.h>
fecba962 17#include <linux/reboot.h>
84af458b
BK
18#include <linux/sched.h>
19#include <linux/stringify.h>
20#include <linux/swap.h>
6c9d2909 21#include <linux/device.h>
84af458b
BK
22#include <asm/firmware.h>
23#include <asm/hvcall.h>
24#include <asm/mmu.h>
25#include <asm/pgalloc.h>
7c0f6ba6 26#include <linux/uaccess.h>
14b8a76b 27#include <linux/memory.h>
212bebb4 28#include <asm/plpar_wrappers.h>
84af458b 29
8f272a5d
ME
30#include "pseries.h"
31
84af458b
BK
32#define CMM_DRIVER_VERSION "1.0.0"
33#define CMM_DEFAULT_DELAY 1
14b8a76b 34#define CMM_HOTPLUG_DELAY 5
84af458b
BK
35#define CMM_DEBUG 0
36#define CMM_DISABLE 0
37#define CMM_OOM_KB 1024
38#define CMM_MIN_MEM_MB 256
39#define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
40#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
14b8a76b
RJ
41/*
42 * The priority level tries to ensure that this notifier is called as
43 * late as possible to reduce thrashing in the shared memory pool.
44 */
45#define CMM_MEM_HOTPLUG_PRI 1
46#define CMM_MEM_ISOLATE_PRI 15
84af458b
BK
47
48static unsigned int delay = CMM_DEFAULT_DELAY;
14b8a76b 49static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
84af458b
BK
50static unsigned int oom_kb = CMM_OOM_KB;
51static unsigned int cmm_debug = CMM_DEBUG;
52static unsigned int cmm_disabled = CMM_DISABLE;
53static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
6c9d2909 54static struct device cmm_dev;
84af458b
BK
55
56MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(CMM_DRIVER_VERSION);
60
57ad583f 61module_param_named(delay, delay, uint, 0644);
84af458b
BK
62MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
63 "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
57ad583f 64module_param_named(hotplug_delay, hotplug_delay, uint, 0644);
b0b5a765 65MODULE_PARM_DESC(hotplug_delay, "Delay (in seconds) after memory hotplug remove "
14b8a76b
RJ
66 "before loaning resumes. "
67 "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
57ad583f 68module_param_named(oom_kb, oom_kb, uint, 0644);
84af458b
BK
69MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
70 "[Default=" __stringify(CMM_OOM_KB) "]");
57ad583f 71module_param_named(min_mem_mb, min_mem_mb, ulong, 0644);
84af458b
BK
72MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
73 "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
57ad583f 74module_param_named(debug, cmm_debug, uint, 0644);
84af458b
BK
75MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
76 "[Default=" __stringify(CMM_DEBUG) "]");
77
78#define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
79
80#define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
81
82struct cmm_page_array {
83 struct cmm_page_array *next;
84 unsigned long index;
85 unsigned long page[CMM_NR_PAGES];
86};
87
88static unsigned long loaned_pages;
89static unsigned long loaned_pages_target;
90static unsigned long oom_freed_pages;
91
92static struct cmm_page_array *cmm_page_list;
93static DEFINE_SPINLOCK(cmm_lock);
94
14b8a76b
RJ
95static DEFINE_MUTEX(hotplug_mutex);
96static int hotplug_occurred; /* protected by the hotplug mutex */
97
84af458b
BK
98static struct task_struct *cmm_thread_ptr;
99
8f272a5d
ME
100static long plpar_page_set_loaned(unsigned long vpa)
101{
102 unsigned long cmo_page_sz = cmo_get_page_size();
103 long rc = 0;
104 int i;
105
106 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
107 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
108
109 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
110 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
111 vpa + i - cmo_page_sz, 0);
112
113 return rc;
114}
115
116static long plpar_page_set_active(unsigned long vpa)
117{
118 unsigned long cmo_page_sz = cmo_get_page_size();
119 long rc = 0;
120 int i;
121
122 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
123 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
124
125 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
126 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
127 vpa + i - cmo_page_sz, 0);
128
129 return rc;
130}
131
84af458b
BK
132/**
133 * cmm_alloc_pages - Allocate pages and mark them as loaned
134 * @nr: number of pages to allocate
135 *
136 * Return value:
137 * number of pages requested to be allocated which were not
138 **/
139static long cmm_alloc_pages(long nr)
140{
141 struct cmm_page_array *pa, *npa;
142 unsigned long addr;
143 long rc;
144
145 cmm_dbg("Begin request for %ld pages\n", nr);
146
147 while (nr) {
14b8a76b
RJ
148 /* Exit if a hotplug operation is in progress or occurred */
149 if (mutex_trylock(&hotplug_mutex)) {
150 if (hotplug_occurred) {
151 mutex_unlock(&hotplug_mutex);
152 break;
153 }
154 mutex_unlock(&hotplug_mutex);
155 } else {
156 break;
157 }
158
84af458b
BK
159 addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
160 __GFP_NORETRY | __GFP_NOMEMALLOC);
161 if (!addr)
162 break;
163 spin_lock(&cmm_lock);
164 pa = cmm_page_list;
165 if (!pa || pa->index >= CMM_NR_PAGES) {
166 /* Need a new page for the page list. */
167 spin_unlock(&cmm_lock);
14b8a76b
RJ
168 npa = (struct cmm_page_array *)__get_free_page(
169 GFP_NOIO | __GFP_NOWARN |
170 __GFP_NORETRY | __GFP_NOMEMALLOC);
84af458b 171 if (!npa) {
5df72bf3 172 pr_info("%s: Can not allocate new page list\n", __func__);
84af458b
BK
173 free_page(addr);
174 break;
175 }
176 spin_lock(&cmm_lock);
177 pa = cmm_page_list;
178
179 if (!pa || pa->index >= CMM_NR_PAGES) {
180 npa->next = pa;
181 npa->index = 0;
182 pa = npa;
183 cmm_page_list = pa;
184 } else
185 free_page((unsigned long) npa);
186 }
187
188 if ((rc = plpar_page_set_loaned(__pa(addr)))) {
5df72bf3 189 pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
84af458b
BK
190 spin_unlock(&cmm_lock);
191 free_page(addr);
192 break;
193 }
194
195 pa->page[pa->index++] = addr;
196 loaned_pages++;
ca79b0c2 197 totalram_pages_dec();
84af458b
BK
198 spin_unlock(&cmm_lock);
199 nr--;
200 }
201
202 cmm_dbg("End request with %ld pages unfulfilled\n", nr);
203 return nr;
204}
205
206/**
207 * cmm_free_pages - Free pages and mark them as active
208 * @nr: number of pages to free
209 *
210 * Return value:
211 * number of pages requested to be freed which were not
212 **/
213static long cmm_free_pages(long nr)
214{
215 struct cmm_page_array *pa;
216 unsigned long addr;
217
218 cmm_dbg("Begin free of %ld pages.\n", nr);
219 spin_lock(&cmm_lock);
220 pa = cmm_page_list;
221 while (nr) {
222 if (!pa || pa->index <= 0)
223 break;
224 addr = pa->page[--pa->index];
225
226 if (pa->index == 0) {
227 pa = pa->next;
228 free_page((unsigned long) cmm_page_list);
229 cmm_page_list = pa;
230 }
231
232 plpar_page_set_active(__pa(addr));
233 free_page(addr);
234 loaned_pages--;
235 nr--;
ca79b0c2 236 totalram_pages_inc();
84af458b
BK
237 }
238 spin_unlock(&cmm_lock);
239 cmm_dbg("End request with %ld pages unfulfilled\n", nr);
240 return nr;
241}
242
243/**
244 * cmm_oom_notify - OOM notifier
245 * @self: notifier block struct
246 * @dummy: not used
247 * @parm: returned - number of pages freed
248 *
249 * Return value:
250 * NOTIFY_OK
251 **/
252static int cmm_oom_notify(struct notifier_block *self,
253 unsigned long dummy, void *parm)
254{
255 unsigned long *freed = parm;
256 long nr = KB2PAGES(oom_kb);
257
258 cmm_dbg("OOM processing started\n");
259 nr = cmm_free_pages(nr);
260 loaned_pages_target = loaned_pages;
261 *freed += KB2PAGES(oom_kb) - nr;
262 oom_freed_pages += KB2PAGES(oom_kb) - nr;
263 cmm_dbg("OOM processing complete\n");
264 return NOTIFY_OK;
265}
266
267/**
268 * cmm_get_mpp - Read memory performance parameters
269 *
270 * Makes hcall to query the current page loan request from the hypervisor.
271 *
272 * Return value:
273 * nothing
274 **/
275static void cmm_get_mpp(void)
276{
277 int rc;
278 struct hvcall_mpp_data mpp_data;
8be8cf5b 279 signed long active_pages_target, page_loan_request, target;
ca79b0c2 280 signed long total_pages = totalram_pages() + loaned_pages;
8be8cf5b 281 signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
84af458b
BK
282
283 rc = h_get_mpp(&mpp_data);
284
285 if (rc != H_SUCCESS)
286 return;
287
288 page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
8be8cf5b
BK
289 target = page_loan_request + (signed long)loaned_pages;
290
291 if (target < 0 || total_pages < min_mem_pages)
292 target = 0;
293
294 if (target > oom_freed_pages)
295 target -= oom_freed_pages;
84af458b 296 else
8be8cf5b
BK
297 target = 0;
298
299 active_pages_target = total_pages - target;
300
301 if (min_mem_pages > active_pages_target)
302 target = total_pages - min_mem_pages;
84af458b 303
8be8cf5b
BK
304 if (target < 0)
305 target = 0;
84af458b 306
8be8cf5b 307 loaned_pages_target = target;
84af458b
BK
308
309 cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
310 page_loan_request, loaned_pages, loaned_pages_target,
ca79b0c2 311 oom_freed_pages, totalram_pages());
84af458b
BK
312}
313
314static struct notifier_block cmm_oom_nb = {
315 .notifier_call = cmm_oom_notify
316};
317
318/**
319 * cmm_thread - CMM task thread
320 * @dummy: not used
321 *
322 * Return value:
323 * 0
324 **/
325static int cmm_thread(void *dummy)
326{
327 unsigned long timeleft;
328
329 while (1) {
330 timeleft = msleep_interruptible(delay * 1000);
331
14b8a76b 332 if (kthread_should_stop() || timeleft)
84af458b 333 break;
14b8a76b
RJ
334
335 if (mutex_trylock(&hotplug_mutex)) {
336 if (hotplug_occurred) {
337 hotplug_occurred = 0;
338 mutex_unlock(&hotplug_mutex);
339 cmm_dbg("Hotplug operation has occurred, "
340 "loaning activity suspended "
341 "for %d seconds.\n",
342 hotplug_delay);
343 timeleft = msleep_interruptible(hotplug_delay *
344 1000);
345 if (kthread_should_stop() || timeleft)
346 break;
347 continue;
348 }
349 mutex_unlock(&hotplug_mutex);
350 } else {
351 cmm_dbg("Hotplug operation in progress, activity "
352 "suspended\n");
353 continue;
84af458b
BK
354 }
355
356 cmm_get_mpp();
357
358 if (loaned_pages_target > loaned_pages) {
359 if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
360 loaned_pages_target = loaned_pages;
361 } else if (loaned_pages_target < loaned_pages)
362 cmm_free_pages(loaned_pages - loaned_pages_target);
363 }
364 return 0;
365}
366
367#define CMM_SHOW(name, format, args...) \
6c9d2909
KS
368 static ssize_t show_##name(struct device *dev, \
369 struct device_attribute *attr, \
3cee67f7 370 char *buf) \
84af458b
BK
371 { \
372 return sprintf(buf, format, ##args); \
373 } \
57ad583f 374 static DEVICE_ATTR(name, 0444, show_##name, NULL)
84af458b
BK
375
376CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
377CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
378
6c9d2909
KS
379static ssize_t show_oom_pages(struct device *dev,
380 struct device_attribute *attr, char *buf)
84af458b
BK
381{
382 return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
383}
384
6c9d2909
KS
385static ssize_t store_oom_pages(struct device *dev,
386 struct device_attribute *attr,
84af458b
BK
387 const char *buf, size_t count)
388{
389 unsigned long val = simple_strtoul (buf, NULL, 10);
390
391 if (!capable(CAP_SYS_ADMIN))
392 return -EPERM;
393 if (val != 0)
394 return -EBADMSG;
395
396 oom_freed_pages = 0;
397 return count;
398}
399
57ad583f 400static DEVICE_ATTR(oom_freed_kb, 0644,
84af458b
BK
401 show_oom_pages, store_oom_pages);
402
6c9d2909
KS
403static struct device_attribute *cmm_attrs[] = {
404 &dev_attr_loaned_kb,
405 &dev_attr_loaned_target_kb,
406 &dev_attr_oom_freed_kb,
84af458b
BK
407};
408
6c9d2909 409static struct bus_type cmm_subsys = {
84af458b 410 .name = "cmm",
6c9d2909 411 .dev_name = "cmm",
84af458b
BK
412};
413
414/**
415 * cmm_sysfs_register - Register with sysfs
416 *
417 * Return value:
418 * 0 on success / other on failure
419 **/
6c9d2909 420static int cmm_sysfs_register(struct device *dev)
84af458b
BK
421{
422 int i, rc;
423
6c9d2909 424 if ((rc = subsys_system_register(&cmm_subsys, NULL)))
84af458b
BK
425 return rc;
426
6c9d2909
KS
427 dev->id = 0;
428 dev->bus = &cmm_subsys;
84af458b 429
6c9d2909
KS
430 if ((rc = device_register(dev)))
431 goto subsys_unregister;
84af458b
BK
432
433 for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
6c9d2909 434 if ((rc = device_create_file(dev, cmm_attrs[i])))
84af458b
BK
435 goto fail;
436 }
437
438 return 0;
439
440fail:
441 while (--i >= 0)
6c9d2909
KS
442 device_remove_file(dev, cmm_attrs[i]);
443 device_unregister(dev);
444subsys_unregister:
445 bus_unregister(&cmm_subsys);
84af458b
BK
446 return rc;
447}
448
449/**
450 * cmm_unregister_sysfs - Unregister from sysfs
451 *
452 **/
6c9d2909 453static void cmm_unregister_sysfs(struct device *dev)
84af458b
BK
454{
455 int i;
456
457 for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
6c9d2909
KS
458 device_remove_file(dev, cmm_attrs[i]);
459 device_unregister(dev);
460 bus_unregister(&cmm_subsys);
84af458b
BK
461}
462
fecba962
BK
463/**
464 * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
465 *
466 **/
467static int cmm_reboot_notifier(struct notifier_block *nb,
468 unsigned long action, void *unused)
469{
470 if (action == SYS_RESTART) {
471 if (cmm_thread_ptr)
472 kthread_stop(cmm_thread_ptr);
473 cmm_thread_ptr = NULL;
474 cmm_free_pages(loaned_pages);
475 }
476 return NOTIFY_DONE;
477}
478
479static struct notifier_block cmm_reboot_nb = {
480 .notifier_call = cmm_reboot_notifier,
481};
482
14b8a76b
RJ
483/**
484 * cmm_count_pages - Count the number of pages loaned in a particular range.
485 *
486 * @arg: memory_isolate_notify structure with address range and count
487 *
488 * Return value:
489 * 0 on success
490 **/
491static unsigned long cmm_count_pages(void *arg)
492{
493 struct memory_isolate_notify *marg = arg;
494 struct cmm_page_array *pa;
495 unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
496 unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
497 unsigned long idx;
498
499 spin_lock(&cmm_lock);
500 pa = cmm_page_list;
501 while (pa) {
502 if ((unsigned long)pa >= start && (unsigned long)pa < end)
503 marg->pages_found++;
504 for (idx = 0; idx < pa->index; idx++)
505 if (pa->page[idx] >= start && pa->page[idx] < end)
506 marg->pages_found++;
507 pa = pa->next;
508 }
509 spin_unlock(&cmm_lock);
510 return 0;
511}
512
513/**
514 * cmm_memory_isolate_cb - Handle memory isolation notifier calls
515 * @self: notifier block struct
516 * @action: action to take
517 * @arg: struct memory_isolate_notify data for handler
518 *
519 * Return value:
520 * NOTIFY_OK or notifier error based on subfunction return value
521 **/
522static int cmm_memory_isolate_cb(struct notifier_block *self,
523 unsigned long action, void *arg)
524{
525 int ret = 0;
526
527 if (action == MEM_ISOLATE_COUNT)
528 ret = cmm_count_pages(arg);
529
7e26065d 530 return notifier_from_errno(ret);
14b8a76b
RJ
531}
532
533static struct notifier_block cmm_mem_isolate_nb = {
534 .notifier_call = cmm_memory_isolate_cb,
535 .priority = CMM_MEM_ISOLATE_PRI
536};
537
538/**
539 * cmm_mem_going_offline - Unloan pages where memory is to be removed
540 * @arg: memory_notify structure with page range to be offlined
541 *
542 * Return value:
543 * 0 on success
544 **/
545static int cmm_mem_going_offline(void *arg)
546{
547 struct memory_notify *marg = arg;
548 unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
549 unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
550 struct cmm_page_array *pa_curr, *pa_last, *npa;
551 unsigned long idx;
552 unsigned long freed = 0;
553
554 cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
555 start_page, marg->nr_pages);
556 spin_lock(&cmm_lock);
557
558 /* Search the page list for pages in the range to be offlined */
559 pa_last = pa_curr = cmm_page_list;
560 while (pa_curr) {
561 for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
562 if ((pa_curr->page[idx] < start_page) ||
563 (pa_curr->page[idx] >= end_page))
564 continue;
565
566 plpar_page_set_active(__pa(pa_curr->page[idx]));
567 free_page(pa_curr->page[idx]);
568 freed++;
569 loaned_pages--;
ca79b0c2 570 totalram_pages_inc();
14b8a76b
RJ
571 pa_curr->page[idx] = pa_last->page[--pa_last->index];
572 if (pa_last->index == 0) {
573 if (pa_curr == pa_last)
574 pa_curr = pa_last->next;
575 pa_last = pa_last->next;
576 free_page((unsigned long)cmm_page_list);
577 cmm_page_list = pa_last;
14b8a76b
RJ
578 }
579 }
580 pa_curr = pa_curr->next;
581 }
582
583 /* Search for page list structures in the range to be offlined */
584 pa_last = NULL;
585 pa_curr = cmm_page_list;
586 while (pa_curr) {
587 if (((unsigned long)pa_curr >= start_page) &&
588 ((unsigned long)pa_curr < end_page)) {
589 npa = (struct cmm_page_array *)__get_free_page(
590 GFP_NOIO | __GFP_NOWARN |
591 __GFP_NORETRY | __GFP_NOMEMALLOC);
592 if (!npa) {
593 spin_unlock(&cmm_lock);
594 cmm_dbg("Failed to allocate memory for list "
595 "management. Memory hotplug "
596 "failed.\n");
e2be2371 597 return -ENOMEM;
14b8a76b
RJ
598 }
599 memcpy(npa, pa_curr, PAGE_SIZE);
600 if (pa_curr == cmm_page_list)
601 cmm_page_list = npa;
602 if (pa_last)
603 pa_last->next = npa;
604 free_page((unsigned long) pa_curr);
605 freed++;
606 pa_curr = npa;
607 }
608
609 pa_last = pa_curr;
610 pa_curr = pa_curr->next;
611 }
612
613 spin_unlock(&cmm_lock);
614 cmm_dbg("Released %ld pages in the search range.\n", freed);
615
616 return 0;
617}
618
619/**
620 * cmm_memory_cb - Handle memory hotplug notifier calls
621 * @self: notifier block struct
622 * @action: action to take
623 * @arg: struct memory_notify data for handler
624 *
625 * Return value:
626 * NOTIFY_OK or notifier error based on subfunction return value
627 *
628 **/
629static int cmm_memory_cb(struct notifier_block *self,
630 unsigned long action, void *arg)
631{
632 int ret = 0;
633
634 switch (action) {
635 case MEM_GOING_OFFLINE:
636 mutex_lock(&hotplug_mutex);
637 hotplug_occurred = 1;
638 ret = cmm_mem_going_offline(arg);
639 break;
640 case MEM_OFFLINE:
641 case MEM_CANCEL_OFFLINE:
642 mutex_unlock(&hotplug_mutex);
643 cmm_dbg("Memory offline operation complete.\n");
644 break;
645 case MEM_GOING_ONLINE:
646 case MEM_ONLINE:
647 case MEM_CANCEL_ONLINE:
648 break;
649 }
650
7e26065d 651 return notifier_from_errno(ret);
14b8a76b
RJ
652}
653
654static struct notifier_block cmm_mem_nb = {
655 .notifier_call = cmm_memory_cb,
656 .priority = CMM_MEM_HOTPLUG_PRI
657};
658
84af458b
BK
659/**
660 * cmm_init - Module initialization
661 *
662 * Return value:
663 * 0 on success / other on failure
664 **/
665static int cmm_init(void)
666{
667 int rc = -ENOMEM;
668
669 if (!firmware_has_feature(FW_FEATURE_CMO))
670 return -EOPNOTSUPP;
671
672 if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
673 return rc;
674
fecba962 675 if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
84af458b
BK
676 goto out_oom_notifier;
677
6c9d2909 678 if ((rc = cmm_sysfs_register(&cmm_dev)))
fecba962
BK
679 goto out_reboot_notifier;
680
14b8a76b
RJ
681 if (register_memory_notifier(&cmm_mem_nb) ||
682 register_memory_isolate_notifier(&cmm_mem_isolate_nb))
683 goto out_unregister_notifier;
684
84af458b
BK
685 if (cmm_disabled)
686 return rc;
687
688 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
689 if (IS_ERR(cmm_thread_ptr)) {
690 rc = PTR_ERR(cmm_thread_ptr);
14b8a76b 691 goto out_unregister_notifier;
84af458b
BK
692 }
693
694 return rc;
695
14b8a76b
RJ
696out_unregister_notifier:
697 unregister_memory_notifier(&cmm_mem_nb);
698 unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
6c9d2909 699 cmm_unregister_sysfs(&cmm_dev);
fecba962
BK
700out_reboot_notifier:
701 unregister_reboot_notifier(&cmm_reboot_nb);
84af458b
BK
702out_oom_notifier:
703 unregister_oom_notifier(&cmm_oom_nb);
704 return rc;
705}
706
707/**
708 * cmm_exit - Module exit
709 *
710 * Return value:
711 * nothing
712 **/
713static void cmm_exit(void)
714{
715 if (cmm_thread_ptr)
716 kthread_stop(cmm_thread_ptr);
717 unregister_oom_notifier(&cmm_oom_nb);
fecba962 718 unregister_reboot_notifier(&cmm_reboot_nb);
14b8a76b
RJ
719 unregister_memory_notifier(&cmm_mem_nb);
720 unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
84af458b 721 cmm_free_pages(loaned_pages);
6c9d2909 722 cmm_unregister_sysfs(&cmm_dev);
84af458b
BK
723}
724
725/**
726 * cmm_set_disable - Disable/Enable CMM
727 *
728 * Return value:
729 * 0 on success / other on failure
730 **/
e4dca7b7 731static int cmm_set_disable(const char *val, const struct kernel_param *kp)
84af458b
BK
732{
733 int disable = simple_strtoul(val, NULL, 10);
734
735 if (disable != 0 && disable != 1)
736 return -EINVAL;
737
738 if (disable && !cmm_disabled) {
739 if (cmm_thread_ptr)
740 kthread_stop(cmm_thread_ptr);
741 cmm_thread_ptr = NULL;
742 cmm_free_pages(loaned_pages);
743 } else if (!disable && cmm_disabled) {
744 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
745 if (IS_ERR(cmm_thread_ptr))
746 return PTR_ERR(cmm_thread_ptr);
747 }
748
749 cmm_disabled = disable;
750 return 0;
751}
752
753module_param_call(disable, cmm_set_disable, param_get_uint,
57ad583f 754 &cmm_disabled, 0644);
84af458b
BK
755MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
756 "[Default=" __stringify(CMM_DISABLE) "]");
757
758module_init(cmm_init);
759module_exit(cmm_exit);