]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/res_counter.c
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[mirror_ubuntu-bionic-kernel.git] / kernel / res_counter.c
1 /*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/res_counter.h>
14 #include <linux/uaccess.h>
15 #include <linux/mm.h>
16
17 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18 {
19 spin_lock_init(&counter->lock);
20 counter->limit = RESOURCE_MAX;
21 counter->soft_limit = RESOURCE_MAX;
22 counter->parent = parent;
23 }
24
25 int res_counter_charge_locked(struct res_counter *counter, unsigned long val,
26 bool force)
27 {
28 int ret = 0;
29
30 if (counter->usage + val > counter->limit) {
31 counter->failcnt++;
32 ret = -ENOMEM;
33 if (!force)
34 return ret;
35 }
36
37 counter->usage += val;
38 if (counter->usage > counter->max_usage)
39 counter->max_usage = counter->usage;
40 return ret;
41 }
42
43 static int __res_counter_charge(struct res_counter *counter, unsigned long val,
44 struct res_counter **limit_fail_at, bool force)
45 {
46 int ret, r;
47 unsigned long flags;
48 struct res_counter *c, *u;
49
50 r = ret = 0;
51 *limit_fail_at = NULL;
52 local_irq_save(flags);
53 for (c = counter; c != NULL; c = c->parent) {
54 spin_lock(&c->lock);
55 r = res_counter_charge_locked(c, val, force);
56 spin_unlock(&c->lock);
57 if (r < 0 && !ret) {
58 ret = r;
59 *limit_fail_at = c;
60 if (!force)
61 break;
62 }
63 }
64
65 if (ret < 0 && !force) {
66 for (u = counter; u != c; u = u->parent) {
67 spin_lock(&u->lock);
68 res_counter_uncharge_locked(u, val);
69 spin_unlock(&u->lock);
70 }
71 }
72 local_irq_restore(flags);
73
74 return ret;
75 }
76
77 int res_counter_charge(struct res_counter *counter, unsigned long val,
78 struct res_counter **limit_fail_at)
79 {
80 return __res_counter_charge(counter, val, limit_fail_at, false);
81 }
82
83 int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
84 struct res_counter **limit_fail_at)
85 {
86 return __res_counter_charge(counter, val, limit_fail_at, true);
87 }
88
89 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
90 {
91 if (WARN_ON(counter->usage < val))
92 val = counter->usage;
93
94 counter->usage -= val;
95 }
96
97 void res_counter_uncharge(struct res_counter *counter, unsigned long val)
98 {
99 unsigned long flags;
100 struct res_counter *c;
101
102 local_irq_save(flags);
103 for (c = counter; c != NULL; c = c->parent) {
104 spin_lock(&c->lock);
105 res_counter_uncharge_locked(c, val);
106 spin_unlock(&c->lock);
107 }
108 local_irq_restore(flags);
109 }
110
111
112 static inline unsigned long long *
113 res_counter_member(struct res_counter *counter, int member)
114 {
115 switch (member) {
116 case RES_USAGE:
117 return &counter->usage;
118 case RES_MAX_USAGE:
119 return &counter->max_usage;
120 case RES_LIMIT:
121 return &counter->limit;
122 case RES_FAILCNT:
123 return &counter->failcnt;
124 case RES_SOFT_LIMIT:
125 return &counter->soft_limit;
126 };
127
128 BUG();
129 return NULL;
130 }
131
132 ssize_t res_counter_read(struct res_counter *counter, int member,
133 const char __user *userbuf, size_t nbytes, loff_t *pos,
134 int (*read_strategy)(unsigned long long val, char *st_buf))
135 {
136 unsigned long long *val;
137 char buf[64], *s;
138
139 s = buf;
140 val = res_counter_member(counter, member);
141 if (read_strategy)
142 s += read_strategy(*val, s);
143 else
144 s += sprintf(s, "%llu\n", *val);
145 return simple_read_from_buffer((void __user *)userbuf, nbytes,
146 pos, buf, s - buf);
147 }
148
149 #if BITS_PER_LONG == 32
150 u64 res_counter_read_u64(struct res_counter *counter, int member)
151 {
152 unsigned long flags;
153 u64 ret;
154
155 spin_lock_irqsave(&counter->lock, flags);
156 ret = *res_counter_member(counter, member);
157 spin_unlock_irqrestore(&counter->lock, flags);
158
159 return ret;
160 }
161 #else
162 u64 res_counter_read_u64(struct res_counter *counter, int member)
163 {
164 return *res_counter_member(counter, member);
165 }
166 #endif
167
168 int res_counter_memparse_write_strategy(const char *buf,
169 unsigned long long *res)
170 {
171 char *end;
172
173 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
174 if (*buf == '-') {
175 *res = simple_strtoull(buf + 1, &end, 10);
176 if (*res != 1 || *end != '\0')
177 return -EINVAL;
178 *res = RESOURCE_MAX;
179 return 0;
180 }
181
182 *res = memparse(buf, &end);
183 if (*end != '\0')
184 return -EINVAL;
185
186 *res = PAGE_ALIGN(*res);
187 return 0;
188 }
189
190 int res_counter_write(struct res_counter *counter, int member,
191 const char *buf, write_strategy_fn write_strategy)
192 {
193 char *end;
194 unsigned long flags;
195 unsigned long long tmp, *val;
196
197 if (write_strategy) {
198 if (write_strategy(buf, &tmp))
199 return -EINVAL;
200 } else {
201 tmp = simple_strtoull(buf, &end, 10);
202 if (*end != '\0')
203 return -EINVAL;
204 }
205 spin_lock_irqsave(&counter->lock, flags);
206 val = res_counter_member(counter, member);
207 *val = tmp;
208 spin_unlock_irqrestore(&counter->lock, flags);
209 return 0;
210 }