]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - mm/hugetlb_cgroup.c
mm: document do_fault_around() feature
[mirror_ubuntu-bionic-kernel.git] / mm / hugetlb_cgroup.c
... / ...
CommitLineData
1/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 */
15
16#include <linux/cgroup.h>
17#include <linux/slab.h>
18#include <linux/hugetlb.h>
19#include <linux/hugetlb_cgroup.h>
20
21struct hugetlb_cgroup {
22 struct cgroup_subsys_state css;
23 /*
24 * the counter to account for hugepages from hugetlb.
25 */
26 struct res_counter hugepage[HUGE_MAX_HSTATE];
27};
28
29#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
30#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
31#define MEMFILE_ATTR(val) ((val) & 0xffff)
32
33static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
34
35static inline
36struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
37{
38 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
39}
40
41static inline
42struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
43{
44 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
45}
46
47static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
48{
49 return (h_cg == root_h_cgroup);
50}
51
52static inline struct hugetlb_cgroup *
53parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
54{
55 return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
56}
57
58static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
59{
60 int idx;
61
62 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
63 if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
64 return true;
65 }
66 return false;
67}
68
69static struct cgroup_subsys_state *
70hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
71{
72 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
73 struct hugetlb_cgroup *h_cgroup;
74 int idx;
75
76 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
77 if (!h_cgroup)
78 return ERR_PTR(-ENOMEM);
79
80 if (parent_h_cgroup) {
81 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
82 res_counter_init(&h_cgroup->hugepage[idx],
83 &parent_h_cgroup->hugepage[idx]);
84 } else {
85 root_h_cgroup = h_cgroup;
86 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
87 res_counter_init(&h_cgroup->hugepage[idx], NULL);
88 }
89 return &h_cgroup->css;
90}
91
92static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
93{
94 struct hugetlb_cgroup *h_cgroup;
95
96 h_cgroup = hugetlb_cgroup_from_css(css);
97 kfree(h_cgroup);
98}
99
100
101/*
102 * Should be called with hugetlb_lock held.
103 * Since we are holding hugetlb_lock, pages cannot get moved from
104 * active list or uncharged from the cgroup, So no need to get
105 * page reference and test for page active here. This function
106 * cannot fail.
107 */
108static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
109 struct page *page)
110{
111 int csize;
112 struct res_counter *counter;
113 struct res_counter *fail_res;
114 struct hugetlb_cgroup *page_hcg;
115 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
116
117 page_hcg = hugetlb_cgroup_from_page(page);
118 /*
119 * We can have pages in active list without any cgroup
120 * ie, hugepage with less than 3 pages. We can safely
121 * ignore those pages.
122 */
123 if (!page_hcg || page_hcg != h_cg)
124 goto out;
125
126 csize = PAGE_SIZE << compound_order(page);
127 if (!parent) {
128 parent = root_h_cgroup;
129 /* root has no limit */
130 res_counter_charge_nofail(&parent->hugepage[idx],
131 csize, &fail_res);
132 }
133 counter = &h_cg->hugepage[idx];
134 res_counter_uncharge_until(counter, counter->parent, csize);
135
136 set_hugetlb_cgroup(page, parent);
137out:
138 return;
139}
140
141/*
142 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
143 * the parent cgroup.
144 */
145static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
146{
147 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
148 struct hstate *h;
149 struct page *page;
150 int idx = 0;
151
152 do {
153 for_each_hstate(h) {
154 spin_lock(&hugetlb_lock);
155 list_for_each_entry(page, &h->hugepage_activelist, lru)
156 hugetlb_cgroup_move_parent(idx, h_cg, page);
157
158 spin_unlock(&hugetlb_lock);
159 idx++;
160 }
161 cond_resched();
162 } while (hugetlb_cgroup_have_usage(h_cg));
163}
164
165int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166 struct hugetlb_cgroup **ptr)
167{
168 int ret = 0;
169 struct res_counter *fail_res;
170 struct hugetlb_cgroup *h_cg = NULL;
171 unsigned long csize = nr_pages * PAGE_SIZE;
172
173 if (hugetlb_cgroup_disabled())
174 goto done;
175 /*
176 * We don't charge any cgroup if the compound page have less
177 * than 3 pages.
178 */
179 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
180 goto done;
181again:
182 rcu_read_lock();
183 h_cg = hugetlb_cgroup_from_task(current);
184 if (!css_tryget(&h_cg->css)) {
185 rcu_read_unlock();
186 goto again;
187 }
188 rcu_read_unlock();
189
190 ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
191 css_put(&h_cg->css);
192done:
193 *ptr = h_cg;
194 return ret;
195}
196
197/* Should be called with hugetlb_lock held */
198void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
199 struct hugetlb_cgroup *h_cg,
200 struct page *page)
201{
202 if (hugetlb_cgroup_disabled() || !h_cg)
203 return;
204
205 set_hugetlb_cgroup(page, h_cg);
206 return;
207}
208
209/*
210 * Should be called with hugetlb_lock held
211 */
212void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
213 struct page *page)
214{
215 struct hugetlb_cgroup *h_cg;
216 unsigned long csize = nr_pages * PAGE_SIZE;
217
218 if (hugetlb_cgroup_disabled())
219 return;
220 VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
221 h_cg = hugetlb_cgroup_from_page(page);
222 if (unlikely(!h_cg))
223 return;
224 set_hugetlb_cgroup(page, NULL);
225 res_counter_uncharge(&h_cg->hugepage[idx], csize);
226 return;
227}
228
229void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
230 struct hugetlb_cgroup *h_cg)
231{
232 unsigned long csize = nr_pages * PAGE_SIZE;
233
234 if (hugetlb_cgroup_disabled() || !h_cg)
235 return;
236
237 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
238 return;
239
240 res_counter_uncharge(&h_cg->hugepage[idx], csize);
241 return;
242}
243
244static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
245 struct cftype *cft)
246{
247 int idx, name;
248 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
249
250 idx = MEMFILE_IDX(cft->private);
251 name = MEMFILE_ATTR(cft->private);
252
253 return res_counter_read_u64(&h_cg->hugepage[idx], name);
254}
255
256static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
257 struct cftype *cft, char *buffer)
258{
259 int idx, name, ret;
260 unsigned long long val;
261 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
262
263 idx = MEMFILE_IDX(cft->private);
264 name = MEMFILE_ATTR(cft->private);
265
266 switch (name) {
267 case RES_LIMIT:
268 if (hugetlb_cgroup_is_root(h_cg)) {
269 /* Can't set limit on root */
270 ret = -EINVAL;
271 break;
272 }
273 /* This function does all necessary parse...reuse it */
274 ret = res_counter_memparse_write_strategy(buffer, &val);
275 if (ret)
276 break;
277 ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
278 break;
279 default:
280 ret = -EINVAL;
281 break;
282 }
283 return ret;
284}
285
286static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
287 unsigned int event)
288{
289 int idx, name, ret = 0;
290 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
291
292 idx = MEMFILE_IDX(event);
293 name = MEMFILE_ATTR(event);
294
295 switch (name) {
296 case RES_MAX_USAGE:
297 res_counter_reset_max(&h_cg->hugepage[idx]);
298 break;
299 case RES_FAILCNT:
300 res_counter_reset_failcnt(&h_cg->hugepage[idx]);
301 break;
302 default:
303 ret = -EINVAL;
304 break;
305 }
306 return ret;
307}
308
309static char *mem_fmt(char *buf, int size, unsigned long hsize)
310{
311 if (hsize >= (1UL << 30))
312 snprintf(buf, size, "%luGB", hsize >> 30);
313 else if (hsize >= (1UL << 20))
314 snprintf(buf, size, "%luMB", hsize >> 20);
315 else
316 snprintf(buf, size, "%luKB", hsize >> 10);
317 return buf;
318}
319
320static void __init __hugetlb_cgroup_file_init(int idx)
321{
322 char buf[32];
323 struct cftype *cft;
324 struct hstate *h = &hstates[idx];
325
326 /* format the size */
327 mem_fmt(buf, 32, huge_page_size(h));
328
329 /* Add the limit file */
330 cft = &h->cgroup_files[0];
331 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
332 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
333 cft->read_u64 = hugetlb_cgroup_read_u64;
334 cft->write_string = hugetlb_cgroup_write;
335
336 /* Add the usage file */
337 cft = &h->cgroup_files[1];
338 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
339 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
340 cft->read_u64 = hugetlb_cgroup_read_u64;
341
342 /* Add the MAX usage file */
343 cft = &h->cgroup_files[2];
344 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
345 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
346 cft->trigger = hugetlb_cgroup_reset;
347 cft->read_u64 = hugetlb_cgroup_read_u64;
348
349 /* Add the failcntfile */
350 cft = &h->cgroup_files[3];
351 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
352 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
353 cft->trigger = hugetlb_cgroup_reset;
354 cft->read_u64 = hugetlb_cgroup_read_u64;
355
356 /* NULL terminate the last cft */
357 cft = &h->cgroup_files[4];
358 memset(cft, 0, sizeof(*cft));
359
360 WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
361
362 return;
363}
364
365void __init hugetlb_cgroup_file_init(void)
366{
367 struct hstate *h;
368
369 for_each_hstate(h) {
370 /*
371 * Add cgroup control files only if the huge page consists
372 * of more than two normal pages. This is because we use
373 * page[2].lru.next for storing cgroup details.
374 */
375 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
376 __hugetlb_cgroup_file_init(hstate_index(h));
377 }
378}
379
380/*
381 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
382 * when we migrate hugepages
383 */
384void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
385{
386 struct hugetlb_cgroup *h_cg;
387 struct hstate *h = page_hstate(oldhpage);
388
389 if (hugetlb_cgroup_disabled())
390 return;
391
392 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
393 spin_lock(&hugetlb_lock);
394 h_cg = hugetlb_cgroup_from_page(oldhpage);
395 set_hugetlb_cgroup(oldhpage, NULL);
396
397 /* move the h_cg details to new cgroup */
398 set_hugetlb_cgroup(newhpage, h_cg);
399 list_move(&newhpage->lru, &h->hugepage_activelist);
400 spin_unlock(&hugetlb_lock);
401 return;
402}
403
404struct cgroup_subsys hugetlb_cgrp_subsys = {
405 .css_alloc = hugetlb_cgroup_css_alloc,
406 .css_offline = hugetlb_cgroup_css_offline,
407 .css_free = hugetlb_cgroup_css_free,
408};