]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/res_counter.c
Merge branch 'drm-radeon-sitn-support' of git://people.freedesktop.org/~airlied/linux
[mirror_ubuntu-bionic-kernel.git] / kernel / res_counter.c
1 /*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/res_counter.h>
14 #include <linux/uaccess.h>
15 #include <linux/mm.h>
16
17 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18 {
19 spin_lock_init(&counter->lock);
20 counter->limit = RESOURCE_MAX;
21 counter->soft_limit = RESOURCE_MAX;
22 counter->parent = parent;
23 }
24
25 int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
26 {
27 if (counter->usage + val > counter->limit) {
28 counter->failcnt++;
29 return -ENOMEM;
30 }
31
32 counter->usage += val;
33 if (counter->usage > counter->max_usage)
34 counter->max_usage = counter->usage;
35 return 0;
36 }
37
38 int res_counter_charge(struct res_counter *counter, unsigned long val,
39 struct res_counter **limit_fail_at)
40 {
41 int ret;
42 unsigned long flags;
43 struct res_counter *c, *u;
44
45 *limit_fail_at = NULL;
46 local_irq_save(flags);
47 for (c = counter; c != NULL; c = c->parent) {
48 spin_lock(&c->lock);
49 ret = res_counter_charge_locked(c, val);
50 spin_unlock(&c->lock);
51 if (ret < 0) {
52 *limit_fail_at = c;
53 goto undo;
54 }
55 }
56 ret = 0;
57 goto done;
58 undo:
59 for (u = counter; u != c; u = u->parent) {
60 spin_lock(&u->lock);
61 res_counter_uncharge_locked(u, val);
62 spin_unlock(&u->lock);
63 }
64 done:
65 local_irq_restore(flags);
66 return ret;
67 }
68
69 int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
70 struct res_counter **limit_fail_at)
71 {
72 int ret, r;
73 unsigned long flags;
74 struct res_counter *c;
75
76 r = ret = 0;
77 *limit_fail_at = NULL;
78 local_irq_save(flags);
79 for (c = counter; c != NULL; c = c->parent) {
80 spin_lock(&c->lock);
81 r = res_counter_charge_locked(c, val);
82 if (r)
83 c->usage += val;
84 spin_unlock(&c->lock);
85 if (r < 0 && ret == 0) {
86 *limit_fail_at = c;
87 ret = r;
88 }
89 }
90 local_irq_restore(flags);
91
92 return ret;
93 }
94 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
95 {
96 if (WARN_ON(counter->usage < val))
97 val = counter->usage;
98
99 counter->usage -= val;
100 }
101
102 void res_counter_uncharge(struct res_counter *counter, unsigned long val)
103 {
104 unsigned long flags;
105 struct res_counter *c;
106
107 local_irq_save(flags);
108 for (c = counter; c != NULL; c = c->parent) {
109 spin_lock(&c->lock);
110 res_counter_uncharge_locked(c, val);
111 spin_unlock(&c->lock);
112 }
113 local_irq_restore(flags);
114 }
115
116
117 static inline unsigned long long *
118 res_counter_member(struct res_counter *counter, int member)
119 {
120 switch (member) {
121 case RES_USAGE:
122 return &counter->usage;
123 case RES_MAX_USAGE:
124 return &counter->max_usage;
125 case RES_LIMIT:
126 return &counter->limit;
127 case RES_FAILCNT:
128 return &counter->failcnt;
129 case RES_SOFT_LIMIT:
130 return &counter->soft_limit;
131 };
132
133 BUG();
134 return NULL;
135 }
136
137 ssize_t res_counter_read(struct res_counter *counter, int member,
138 const char __user *userbuf, size_t nbytes, loff_t *pos,
139 int (*read_strategy)(unsigned long long val, char *st_buf))
140 {
141 unsigned long long *val;
142 char buf[64], *s;
143
144 s = buf;
145 val = res_counter_member(counter, member);
146 if (read_strategy)
147 s += read_strategy(*val, s);
148 else
149 s += sprintf(s, "%llu\n", *val);
150 return simple_read_from_buffer((void __user *)userbuf, nbytes,
151 pos, buf, s - buf);
152 }
153
154 #if BITS_PER_LONG == 32
155 u64 res_counter_read_u64(struct res_counter *counter, int member)
156 {
157 unsigned long flags;
158 u64 ret;
159
160 spin_lock_irqsave(&counter->lock, flags);
161 ret = *res_counter_member(counter, member);
162 spin_unlock_irqrestore(&counter->lock, flags);
163
164 return ret;
165 }
166 #else
167 u64 res_counter_read_u64(struct res_counter *counter, int member)
168 {
169 return *res_counter_member(counter, member);
170 }
171 #endif
172
173 int res_counter_memparse_write_strategy(const char *buf,
174 unsigned long long *res)
175 {
176 char *end;
177
178 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
179 if (*buf == '-') {
180 *res = simple_strtoull(buf + 1, &end, 10);
181 if (*res != 1 || *end != '\0')
182 return -EINVAL;
183 *res = RESOURCE_MAX;
184 return 0;
185 }
186
187 *res = memparse(buf, &end);
188 if (*end != '\0')
189 return -EINVAL;
190
191 *res = PAGE_ALIGN(*res);
192 return 0;
193 }
194
195 int res_counter_write(struct res_counter *counter, int member,
196 const char *buf, write_strategy_fn write_strategy)
197 {
198 char *end;
199 unsigned long flags;
200 unsigned long long tmp, *val;
201
202 if (write_strategy) {
203 if (write_strategy(buf, &tmp))
204 return -EINVAL;
205 } else {
206 tmp = simple_strtoull(buf, &end, 10);
207 if (*end != '\0')
208 return -EINVAL;
209 }
210 spin_lock_irqsave(&counter->lock, flags);
211 val = res_counter_member(counter, member);
212 *val = tmp;
213 spin_unlock_irqrestore(&counter->lock, flags);
214 return 0;
215 }