]>
Commit | Line | Data |
---|---|---|
1 | /* memcontrol.c - Memory Controller | |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
6 | * Copyright 2007 OpenVZ SWsoft Inc | |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #include <linux/res_counter.h> | |
21 | #include <linux/memcontrol.h> | |
22 | #include <linux/cgroup.h> | |
23 | #include <linux/mm.h> | |
24 | #include <linux/page-flags.h> | |
25 | #include <linux/backing-dev.h> | |
26 | #include <linux/bit_spinlock.h> | |
27 | #include <linux/rcupdate.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/fs.h> | |
31 | ||
32 | #include <asm/uaccess.h> | |
33 | ||
34 | struct cgroup_subsys mem_cgroup_subsys; | |
35 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; | |
36 | ||
37 | /* | |
38 | * The memory controller data structure. The memory controller controls both | |
39 | * page cache and RSS per cgroup. We would eventually like to provide | |
40 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
41 | * to help the administrator determine what knobs to tune. | |
42 | * | |
43 | * TODO: Add a water mark for the memory controller. Reclaim will begin when | |
44 | * we hit the water mark. May be even add a low water mark, such that | |
45 | * no reclaim occurs from a cgroup at it's low water mark, this is | |
46 | * a feature that will be implemented much later in the future. | |
47 | */ | |
48 | struct mem_cgroup { | |
49 | struct cgroup_subsys_state css; | |
50 | /* | |
51 | * the counter to account for memory usage | |
52 | */ | |
53 | struct res_counter res; | |
54 | /* | |
55 | * Per cgroup active and inactive list, similar to the | |
56 | * per zone LRU lists. | |
57 | * TODO: Consider making these lists per zone | |
58 | */ | |
59 | struct list_head active_list; | |
60 | struct list_head inactive_list; | |
61 | /* | |
62 | * spin_lock to protect the per cgroup LRU | |
63 | */ | |
64 | spinlock_t lru_lock; | |
65 | unsigned long control_type; /* control RSS or RSS+Pagecache */ | |
66 | }; | |
67 | ||
68 | /* | |
69 | * We use the lower bit of the page->page_cgroup pointer as a bit spin | |
70 | * lock. We need to ensure that page->page_cgroup is atleast two | |
71 | * byte aligned (based on comments from Nick Piggin) | |
72 | */ | |
73 | #define PAGE_CGROUP_LOCK_BIT 0x0 | |
74 | #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) | |
75 | ||
76 | /* | |
77 | * A page_cgroup page is associated with every page descriptor. The | |
78 | * page_cgroup helps us identify information about the cgroup | |
79 | */ | |
80 | struct page_cgroup { | |
81 | struct list_head lru; /* per cgroup LRU list */ | |
82 | struct page *page; | |
83 | struct mem_cgroup *mem_cgroup; | |
84 | atomic_t ref_cnt; /* Helpful when pages move b/w */ | |
85 | /* mapped and cached states */ | |
86 | }; | |
87 | ||
88 | enum { | |
89 | MEM_CGROUP_TYPE_UNSPEC = 0, | |
90 | MEM_CGROUP_TYPE_MAPPED, | |
91 | MEM_CGROUP_TYPE_CACHED, | |
92 | MEM_CGROUP_TYPE_ALL, | |
93 | MEM_CGROUP_TYPE_MAX, | |
94 | }; | |
95 | ||
96 | static struct mem_cgroup init_mem_cgroup; | |
97 | ||
98 | static inline | |
99 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | |
100 | { | |
101 | return container_of(cgroup_subsys_state(cont, | |
102 | mem_cgroup_subsys_id), struct mem_cgroup, | |
103 | css); | |
104 | } | |
105 | ||
106 | static inline | |
107 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | |
108 | { | |
109 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | |
110 | struct mem_cgroup, css); | |
111 | } | |
112 | ||
113 | void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) | |
114 | { | |
115 | struct mem_cgroup *mem; | |
116 | ||
117 | mem = mem_cgroup_from_task(p); | |
118 | css_get(&mem->css); | |
119 | mm->mem_cgroup = mem; | |
120 | } | |
121 | ||
122 | void mm_free_cgroup(struct mm_struct *mm) | |
123 | { | |
124 | css_put(&mm->mem_cgroup->css); | |
125 | } | |
126 | ||
127 | static inline int page_cgroup_locked(struct page *page) | |
128 | { | |
129 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, | |
130 | &page->page_cgroup); | |
131 | } | |
132 | ||
133 | void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) | |
134 | { | |
135 | int locked; | |
136 | ||
137 | /* | |
138 | * While resetting the page_cgroup we might not hold the | |
139 | * page_cgroup lock. free_hot_cold_page() is an example | |
140 | * of such a scenario | |
141 | */ | |
142 | if (pc) | |
143 | VM_BUG_ON(!page_cgroup_locked(page)); | |
144 | locked = (page->page_cgroup & PAGE_CGROUP_LOCK); | |
145 | page->page_cgroup = ((unsigned long)pc | locked); | |
146 | } | |
147 | ||
148 | struct page_cgroup *page_get_page_cgroup(struct page *page) | |
149 | { | |
150 | return (struct page_cgroup *) | |
151 | (page->page_cgroup & ~PAGE_CGROUP_LOCK); | |
152 | } | |
153 | ||
154 | static void __always_inline lock_page_cgroup(struct page *page) | |
155 | { | |
156 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
157 | VM_BUG_ON(!page_cgroup_locked(page)); | |
158 | } | |
159 | ||
160 | static void __always_inline unlock_page_cgroup(struct page *page) | |
161 | { | |
162 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
163 | } | |
164 | ||
165 | /* | |
166 | * Tie new page_cgroup to struct page under lock_page_cgroup() | |
167 | * This can fail if the page has been tied to a page_cgroup. | |
168 | * If success, returns 0. | |
169 | */ | |
170 | static inline int | |
171 | page_cgroup_assign_new_page_cgroup(struct page *page, struct page_cgroup *pc) | |
172 | { | |
173 | int ret = 0; | |
174 | ||
175 | lock_page_cgroup(page); | |
176 | if (!page_get_page_cgroup(page)) | |
177 | page_assign_page_cgroup(page, pc); | |
178 | else /* A page is tied to other pc. */ | |
179 | ret = 1; | |
180 | unlock_page_cgroup(page); | |
181 | return ret; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Clear page->page_cgroup member under lock_page_cgroup(). | |
186 | * If given "pc" value is different from one page->page_cgroup, | |
187 | * page->cgroup is not cleared. | |
188 | * Returns a value of page->page_cgroup at lock taken. | |
189 | * A can can detect failure of clearing by following | |
190 | * clear_page_cgroup(page, pc) == pc | |
191 | */ | |
192 | ||
193 | static inline struct page_cgroup * | |
194 | clear_page_cgroup(struct page *page, struct page_cgroup *pc) | |
195 | { | |
196 | struct page_cgroup *ret; | |
197 | /* lock and clear */ | |
198 | lock_page_cgroup(page); | |
199 | ret = page_get_page_cgroup(page); | |
200 | if (likely(ret == pc)) | |
201 | page_assign_page_cgroup(page, NULL); | |
202 | unlock_page_cgroup(page); | |
203 | return ret; | |
204 | } | |
205 | ||
206 | ||
207 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
208 | { | |
209 | if (active) | |
210 | list_move(&pc->lru, &pc->mem_cgroup->active_list); | |
211 | else | |
212 | list_move(&pc->lru, &pc->mem_cgroup->inactive_list); | |
213 | } | |
214 | ||
215 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | |
216 | { | |
217 | int ret; | |
218 | ||
219 | task_lock(task); | |
220 | ret = task->mm && mm_cgroup(task->mm) == mem; | |
221 | task_unlock(task); | |
222 | return ret; | |
223 | } | |
224 | ||
225 | /* | |
226 | * This routine assumes that the appropriate zone's lru lock is already held | |
227 | */ | |
228 | void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
229 | { | |
230 | struct mem_cgroup *mem; | |
231 | if (!pc) | |
232 | return; | |
233 | ||
234 | mem = pc->mem_cgroup; | |
235 | ||
236 | spin_lock(&mem->lru_lock); | |
237 | __mem_cgroup_move_lists(pc, active); | |
238 | spin_unlock(&mem->lru_lock); | |
239 | } | |
240 | ||
241 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |
242 | struct list_head *dst, | |
243 | unsigned long *scanned, int order, | |
244 | int mode, struct zone *z, | |
245 | struct mem_cgroup *mem_cont, | |
246 | int active) | |
247 | { | |
248 | unsigned long nr_taken = 0; | |
249 | struct page *page; | |
250 | unsigned long scan; | |
251 | LIST_HEAD(pc_list); | |
252 | struct list_head *src; | |
253 | struct page_cgroup *pc, *tmp; | |
254 | ||
255 | if (active) | |
256 | src = &mem_cont->active_list; | |
257 | else | |
258 | src = &mem_cont->inactive_list; | |
259 | ||
260 | spin_lock(&mem_cont->lru_lock); | |
261 | scan = 0; | |
262 | list_for_each_entry_safe_reverse(pc, tmp, src, lru) { | |
263 | if (scan++ > nr_to_scan) | |
264 | break; | |
265 | page = pc->page; | |
266 | VM_BUG_ON(!pc); | |
267 | ||
268 | if (unlikely(!PageLRU(page))) { | |
269 | scan--; | |
270 | continue; | |
271 | } | |
272 | ||
273 | if (PageActive(page) && !active) { | |
274 | __mem_cgroup_move_lists(pc, true); | |
275 | scan--; | |
276 | continue; | |
277 | } | |
278 | if (!PageActive(page) && active) { | |
279 | __mem_cgroup_move_lists(pc, false); | |
280 | scan--; | |
281 | continue; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Reclaim, per zone | |
286 | * TODO: make the active/inactive lists per zone | |
287 | */ | |
288 | if (page_zone(page) != z) | |
289 | continue; | |
290 | ||
291 | /* | |
292 | * Check if the meta page went away from under us | |
293 | */ | |
294 | if (!list_empty(&pc->lru)) | |
295 | list_move(&pc->lru, &pc_list); | |
296 | else | |
297 | continue; | |
298 | ||
299 | if (__isolate_lru_page(page, mode) == 0) { | |
300 | list_move(&page->lru, dst); | |
301 | nr_taken++; | |
302 | } | |
303 | } | |
304 | ||
305 | list_splice(&pc_list, src); | |
306 | spin_unlock(&mem_cont->lru_lock); | |
307 | ||
308 | *scanned = scan; | |
309 | return nr_taken; | |
310 | } | |
311 | ||
312 | /* | |
313 | * Charge the memory controller for page usage. | |
314 | * Return | |
315 | * 0 if the charge was successful | |
316 | * < 0 if the cgroup is over its limit | |
317 | */ | |
318 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | |
319 | gfp_t gfp_mask) | |
320 | { | |
321 | struct mem_cgroup *mem; | |
322 | struct page_cgroup *pc; | |
323 | unsigned long flags; | |
324 | unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | |
325 | ||
326 | /* | |
327 | * Should page_cgroup's go to their own slab? | |
328 | * One could optimize the performance of the charging routine | |
329 | * by saving a bit in the page_flags and using it as a lock | |
330 | * to see if the cgroup page already has a page_cgroup associated | |
331 | * with it | |
332 | */ | |
333 | retry: | |
334 | lock_page_cgroup(page); | |
335 | pc = page_get_page_cgroup(page); | |
336 | /* | |
337 | * The page_cgroup exists and the page has already been accounted | |
338 | */ | |
339 | if (pc) { | |
340 | if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { | |
341 | /* this page is under being uncharged ? */ | |
342 | unlock_page_cgroup(page); | |
343 | cpu_relax(); | |
344 | goto retry; | |
345 | } else { | |
346 | unlock_page_cgroup(page); | |
347 | goto done; | |
348 | } | |
349 | } | |
350 | ||
351 | unlock_page_cgroup(page); | |
352 | ||
353 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); | |
354 | if (pc == NULL) | |
355 | goto err; | |
356 | ||
357 | rcu_read_lock(); | |
358 | /* | |
359 | * We always charge the cgroup the mm_struct belongs to | |
360 | * the mm_struct's mem_cgroup changes on task migration if the | |
361 | * thread group leader migrates. It's possible that mm is not | |
362 | * set, if so charge the init_mm (happens for pagecache usage). | |
363 | */ | |
364 | if (!mm) | |
365 | mm = &init_mm; | |
366 | ||
367 | mem = rcu_dereference(mm->mem_cgroup); | |
368 | /* | |
369 | * For every charge from the cgroup, increment reference | |
370 | * count | |
371 | */ | |
372 | css_get(&mem->css); | |
373 | rcu_read_unlock(); | |
374 | ||
375 | /* | |
376 | * If we created the page_cgroup, we should free it on exceeding | |
377 | * the cgroup limit. | |
378 | */ | |
379 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { | |
380 | bool is_atomic = gfp_mask & GFP_ATOMIC; | |
381 | /* | |
382 | * We cannot reclaim under GFP_ATOMIC, fail the charge | |
383 | */ | |
384 | if (is_atomic) | |
385 | goto noreclaim; | |
386 | ||
387 | if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | |
388 | continue; | |
389 | ||
390 | /* | |
391 | * try_to_free_mem_cgroup_pages() might not give us a full | |
392 | * picture of reclaim. Some pages are reclaimed and might be | |
393 | * moved to swap cache or just unmapped from the cgroup. | |
394 | * Check the limit again to see if the reclaim reduced the | |
395 | * current usage of the cgroup before giving up | |
396 | */ | |
397 | if (res_counter_check_under_limit(&mem->res)) | |
398 | continue; | |
399 | /* | |
400 | * Since we control both RSS and cache, we end up with a | |
401 | * very interesting scenario where we end up reclaiming | |
402 | * memory (essentially RSS), since the memory is pushed | |
403 | * to swap cache, we eventually end up adding those | |
404 | * pages back to our list. Hence we give ourselves a | |
405 | * few chances before we fail | |
406 | */ | |
407 | else if (nr_retries--) { | |
408 | congestion_wait(WRITE, HZ/10); | |
409 | continue; | |
410 | } | |
411 | noreclaim: | |
412 | css_put(&mem->css); | |
413 | if (!is_atomic) | |
414 | mem_cgroup_out_of_memory(mem, GFP_KERNEL); | |
415 | goto free_pc; | |
416 | } | |
417 | ||
418 | atomic_set(&pc->ref_cnt, 1); | |
419 | pc->mem_cgroup = mem; | |
420 | pc->page = page; | |
421 | if (page_cgroup_assign_new_page_cgroup(page, pc)) { | |
422 | /* | |
423 | * an another charge is added to this page already. | |
424 | * we do take lock_page_cgroup(page) again and read | |
425 | * page->cgroup, increment refcnt.... just retry is OK. | |
426 | */ | |
427 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
428 | css_put(&mem->css); | |
429 | kfree(pc); | |
430 | goto retry; | |
431 | } | |
432 | ||
433 | spin_lock_irqsave(&mem->lru_lock, flags); | |
434 | list_add(&pc->lru, &mem->active_list); | |
435 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
436 | ||
437 | done: | |
438 | return 0; | |
439 | free_pc: | |
440 | kfree(pc); | |
441 | err: | |
442 | return -ENOMEM; | |
443 | } | |
444 | ||
445 | /* | |
446 | * See if the cached pages should be charged at all? | |
447 | */ | |
448 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |
449 | gfp_t gfp_mask) | |
450 | { | |
451 | struct mem_cgroup *mem; | |
452 | if (!mm) | |
453 | mm = &init_mm; | |
454 | ||
455 | mem = rcu_dereference(mm->mem_cgroup); | |
456 | if (mem->control_type == MEM_CGROUP_TYPE_ALL) | |
457 | return mem_cgroup_charge(page, mm, gfp_mask); | |
458 | else | |
459 | return 0; | |
460 | } | |
461 | ||
462 | /* | |
463 | * Uncharging is always a welcome operation, we never complain, simply | |
464 | * uncharge. | |
465 | */ | |
466 | void mem_cgroup_uncharge(struct page_cgroup *pc) | |
467 | { | |
468 | struct mem_cgroup *mem; | |
469 | struct page *page; | |
470 | unsigned long flags; | |
471 | ||
472 | /* | |
473 | * This can handle cases when a page is not charged at all and we | |
474 | * are switching between handling the control_type. | |
475 | */ | |
476 | if (!pc) | |
477 | return; | |
478 | ||
479 | if (atomic_dec_and_test(&pc->ref_cnt)) { | |
480 | page = pc->page; | |
481 | /* | |
482 | * get page->cgroup and clear it under lock. | |
483 | */ | |
484 | if (clear_page_cgroup(page, pc) == pc) { | |
485 | mem = pc->mem_cgroup; | |
486 | css_put(&mem->css); | |
487 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
488 | spin_lock_irqsave(&mem->lru_lock, flags); | |
489 | list_del_init(&pc->lru); | |
490 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
491 | kfree(pc); | |
492 | } else { | |
493 | /* | |
494 | * Note:This will be removed when force-empty patch is | |
495 | * applied. just show warning here. | |
496 | */ | |
497 | printk(KERN_ERR "Race in mem_cgroup_uncharge() ?"); | |
498 | dump_stack(); | |
499 | } | |
500 | } | |
501 | } | |
502 | /* | |
503 | * Returns non-zero if a page (under migration) has valid page_cgroup member. | |
504 | * Refcnt of page_cgroup is incremented. | |
505 | */ | |
506 | ||
507 | int mem_cgroup_prepare_migration(struct page *page) | |
508 | { | |
509 | struct page_cgroup *pc; | |
510 | int ret = 0; | |
511 | lock_page_cgroup(page); | |
512 | pc = page_get_page_cgroup(page); | |
513 | if (pc && atomic_inc_not_zero(&pc->ref_cnt)) | |
514 | ret = 1; | |
515 | unlock_page_cgroup(page); | |
516 | return ret; | |
517 | } | |
518 | ||
519 | void mem_cgroup_end_migration(struct page *page) | |
520 | { | |
521 | struct page_cgroup *pc = page_get_page_cgroup(page); | |
522 | mem_cgroup_uncharge(pc); | |
523 | } | |
524 | /* | |
525 | * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. | |
526 | * And no race with uncharge() routines because page_cgroup for *page* | |
527 | * has extra one reference by mem_cgroup_prepare_migration. | |
528 | */ | |
529 | ||
530 | void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |
531 | { | |
532 | struct page_cgroup *pc; | |
533 | retry: | |
534 | pc = page_get_page_cgroup(page); | |
535 | if (!pc) | |
536 | return; | |
537 | if (clear_page_cgroup(page, pc) != pc) | |
538 | goto retry; | |
539 | pc->page = newpage; | |
540 | lock_page_cgroup(newpage); | |
541 | page_assign_page_cgroup(newpage, pc); | |
542 | unlock_page_cgroup(newpage); | |
543 | return; | |
544 | } | |
545 | ||
546 | int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) | |
547 | { | |
548 | *tmp = memparse(buf, &buf); | |
549 | if (*buf != '\0') | |
550 | return -EINVAL; | |
551 | ||
552 | /* | |
553 | * Round up the value to the closest page size | |
554 | */ | |
555 | *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT; | |
556 | return 0; | |
557 | } | |
558 | ||
559 | static ssize_t mem_cgroup_read(struct cgroup *cont, | |
560 | struct cftype *cft, struct file *file, | |
561 | char __user *userbuf, size_t nbytes, loff_t *ppos) | |
562 | { | |
563 | return res_counter_read(&mem_cgroup_from_cont(cont)->res, | |
564 | cft->private, userbuf, nbytes, ppos, | |
565 | NULL); | |
566 | } | |
567 | ||
568 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | |
569 | struct file *file, const char __user *userbuf, | |
570 | size_t nbytes, loff_t *ppos) | |
571 | { | |
572 | return res_counter_write(&mem_cgroup_from_cont(cont)->res, | |
573 | cft->private, userbuf, nbytes, ppos, | |
574 | mem_cgroup_write_strategy); | |
575 | } | |
576 | ||
577 | static ssize_t mem_control_type_write(struct cgroup *cont, | |
578 | struct cftype *cft, struct file *file, | |
579 | const char __user *userbuf, | |
580 | size_t nbytes, loff_t *pos) | |
581 | { | |
582 | int ret; | |
583 | char *buf, *end; | |
584 | unsigned long tmp; | |
585 | struct mem_cgroup *mem; | |
586 | ||
587 | mem = mem_cgroup_from_cont(cont); | |
588 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | |
589 | ret = -ENOMEM; | |
590 | if (buf == NULL) | |
591 | goto out; | |
592 | ||
593 | buf[nbytes] = 0; | |
594 | ret = -EFAULT; | |
595 | if (copy_from_user(buf, userbuf, nbytes)) | |
596 | goto out_free; | |
597 | ||
598 | ret = -EINVAL; | |
599 | tmp = simple_strtoul(buf, &end, 10); | |
600 | if (*end != '\0') | |
601 | goto out_free; | |
602 | ||
603 | if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) | |
604 | goto out_free; | |
605 | ||
606 | mem->control_type = tmp; | |
607 | ret = nbytes; | |
608 | out_free: | |
609 | kfree(buf); | |
610 | out: | |
611 | return ret; | |
612 | } | |
613 | ||
614 | static ssize_t mem_control_type_read(struct cgroup *cont, | |
615 | struct cftype *cft, | |
616 | struct file *file, char __user *userbuf, | |
617 | size_t nbytes, loff_t *ppos) | |
618 | { | |
619 | unsigned long val; | |
620 | char buf[64], *s; | |
621 | struct mem_cgroup *mem; | |
622 | ||
623 | mem = mem_cgroup_from_cont(cont); | |
624 | s = buf; | |
625 | val = mem->control_type; | |
626 | s += sprintf(s, "%lu\n", val); | |
627 | return simple_read_from_buffer((void __user *)userbuf, nbytes, | |
628 | ppos, buf, s - buf); | |
629 | } | |
630 | ||
631 | static struct cftype mem_cgroup_files[] = { | |
632 | { | |
633 | .name = "usage_in_bytes", | |
634 | .private = RES_USAGE, | |
635 | .read = mem_cgroup_read, | |
636 | }, | |
637 | { | |
638 | .name = "limit_in_bytes", | |
639 | .private = RES_LIMIT, | |
640 | .write = mem_cgroup_write, | |
641 | .read = mem_cgroup_read, | |
642 | }, | |
643 | { | |
644 | .name = "failcnt", | |
645 | .private = RES_FAILCNT, | |
646 | .read = mem_cgroup_read, | |
647 | }, | |
648 | { | |
649 | .name = "control_type", | |
650 | .write = mem_control_type_write, | |
651 | .read = mem_control_type_read, | |
652 | }, | |
653 | }; | |
654 | ||
655 | static struct mem_cgroup init_mem_cgroup; | |
656 | ||
657 | static struct cgroup_subsys_state * | |
658 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |
659 | { | |
660 | struct mem_cgroup *mem; | |
661 | ||
662 | if (unlikely((cont->parent) == NULL)) { | |
663 | mem = &init_mem_cgroup; | |
664 | init_mm.mem_cgroup = mem; | |
665 | } else | |
666 | mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); | |
667 | ||
668 | if (mem == NULL) | |
669 | return NULL; | |
670 | ||
671 | res_counter_init(&mem->res); | |
672 | INIT_LIST_HEAD(&mem->active_list); | |
673 | INIT_LIST_HEAD(&mem->inactive_list); | |
674 | spin_lock_init(&mem->lru_lock); | |
675 | mem->control_type = MEM_CGROUP_TYPE_ALL; | |
676 | return &mem->css; | |
677 | } | |
678 | ||
679 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |
680 | struct cgroup *cont) | |
681 | { | |
682 | kfree(mem_cgroup_from_cont(cont)); | |
683 | } | |
684 | ||
685 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | |
686 | struct cgroup *cont) | |
687 | { | |
688 | return cgroup_add_files(cont, ss, mem_cgroup_files, | |
689 | ARRAY_SIZE(mem_cgroup_files)); | |
690 | } | |
691 | ||
692 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |
693 | struct cgroup *cont, | |
694 | struct cgroup *old_cont, | |
695 | struct task_struct *p) | |
696 | { | |
697 | struct mm_struct *mm; | |
698 | struct mem_cgroup *mem, *old_mem; | |
699 | ||
700 | mm = get_task_mm(p); | |
701 | if (mm == NULL) | |
702 | return; | |
703 | ||
704 | mem = mem_cgroup_from_cont(cont); | |
705 | old_mem = mem_cgroup_from_cont(old_cont); | |
706 | ||
707 | if (mem == old_mem) | |
708 | goto out; | |
709 | ||
710 | /* | |
711 | * Only thread group leaders are allowed to migrate, the mm_struct is | |
712 | * in effect owned by the leader | |
713 | */ | |
714 | if (p->tgid != p->pid) | |
715 | goto out; | |
716 | ||
717 | css_get(&mem->css); | |
718 | rcu_assign_pointer(mm->mem_cgroup, mem); | |
719 | css_put(&old_mem->css); | |
720 | ||
721 | out: | |
722 | mmput(mm); | |
723 | return; | |
724 | } | |
725 | ||
726 | struct cgroup_subsys mem_cgroup_subsys = { | |
727 | .name = "memory", | |
728 | .subsys_id = mem_cgroup_subsys_id, | |
729 | .create = mem_cgroup_create, | |
730 | .destroy = mem_cgroup_destroy, | |
731 | .populate = mem_cgroup_populate, | |
732 | .attach = mem_cgroup_move_task, | |
733 | .early_init = 1, | |
734 | }; |