]> git.proxmox.com Git - mirror_qemu.git/blob - block/throttle-groups.c
block: convert ThrottleGroup to object with QOM
[mirror_qemu.git] / block / throttle-groups.c
1 /*
2 * QEMU block throttling group infrastructure
3 *
4 * Copyright (C) Nodalink, EURL. 2014
5 * Copyright (C) Igalia, S.L. 2015
6 *
7 * Authors:
8 * BenoƮt Canet <benoit.canet@nodalink.com>
9 * Alberto Garcia <berto@igalia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 or
14 * (at your option) version 3 of the License.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #include "qemu/osdep.h"
26 #include "sysemu/block-backend.h"
27 #include "block/throttle-groups.h"
28 #include "qemu/throttle-options.h"
29 #include "qemu/queue.h"
30 #include "qemu/thread.h"
31 #include "sysemu/qtest.h"
32 #include "qapi/error.h"
33 #include "qapi-visit.h"
34 #include "qom/object.h"
35 #include "qom/object_interfaces.h"
36
37 static void throttle_group_obj_init(Object *obj);
38 static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
39
40 /* The ThrottleGroup structure (with its ThrottleState) is shared
41 * among different ThrottleGroupMembers and it's independent from
42 * AioContext, so in order to use it from different threads it needs
43 * its own locking.
44 *
45 * This locking is however handled internally in this file, so it's
46 * transparent to outside users.
47 *
48 * The whole ThrottleGroup structure is private and invisible to
49 * outside users, that only use it through its ThrottleState.
50 *
51 * In addition to the ThrottleGroup structure, ThrottleGroupMember has
52 * fields that need to be accessed by other members of the group and
53 * therefore also need to be protected by this lock. Once a
54 * ThrottleGroupMember is registered in a group those fields can be accessed
55 * by other threads any time.
56 *
57 * Again, all this is handled internally and is mostly transparent to
58 * the outside. The 'throttle_timers' field however has an additional
59 * constraint because it may be temporarily invalid (see for example
60 * blk_set_aio_context()). Therefore in this file a thread will
61 * access some other ThrottleGroupMember's timers only after verifying that
62 * that ThrottleGroupMember has throttled requests in the queue.
63 */
64 typedef struct ThrottleGroup {
65 Object parent_obj;
66
67 /* refuse individual property change if initialization is complete */
68 bool is_initialized;
69 char *name; /* This is constant during the lifetime of the group */
70
71 QemuMutex lock; /* This lock protects the following four fields */
72 ThrottleState ts;
73 QLIST_HEAD(, ThrottleGroupMember) head;
74 ThrottleGroupMember *tokens[2];
75 bool any_timer_armed[2];
76 QEMUClockType clock_type;
77
78 /* This field is protected by the global QEMU mutex */
79 QTAILQ_ENTRY(ThrottleGroup) list;
80 } ThrottleGroup;
81
82 /* This is protected by the global QEMU mutex */
83 static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
84 QTAILQ_HEAD_INITIALIZER(throttle_groups);
85
86
87 /* This function reads throttle_groups and must be called under the global
88 * mutex.
89 */
90 static ThrottleGroup *throttle_group_by_name(const char *name)
91 {
92 ThrottleGroup *iter;
93
94 /* Look for an existing group with that name */
95 QTAILQ_FOREACH(iter, &throttle_groups, list) {
96 if (!g_strcmp0(name, iter->name)) {
97 return iter;
98 }
99 }
100
101 return NULL;
102 }
103
104 /* Increments the reference count of a ThrottleGroup given its name.
105 *
106 * If no ThrottleGroup is found with the given name a new one is
107 * created.
108 *
109 * This function edits throttle_groups and must be called under the global
110 * mutex.
111 *
112 * @name: the name of the ThrottleGroup
113 * @ret: the ThrottleState member of the ThrottleGroup
114 */
115 ThrottleState *throttle_group_incref(const char *name)
116 {
117 ThrottleGroup *tg = NULL;
118
119 /* Look for an existing group with that name */
120 tg = throttle_group_by_name(name);
121
122 if (tg) {
123 object_ref(OBJECT(tg));
124 } else {
125 /* Create a new one if not found */
126 /* new ThrottleGroup obj will have a refcnt = 1 */
127 tg = THROTTLE_GROUP(object_new(TYPE_THROTTLE_GROUP));
128 tg->name = g_strdup(name);
129 throttle_group_obj_complete(USER_CREATABLE(tg), &error_abort);
130 }
131
132 return &tg->ts;
133 }
134
135 /* Decrease the reference count of a ThrottleGroup.
136 *
137 * When the reference count reaches zero the ThrottleGroup is
138 * destroyed.
139 *
140 * This function edits throttle_groups and must be called under the global
141 * mutex.
142 *
143 * @ts: The ThrottleGroup to unref, given by its ThrottleState member
144 */
145 void throttle_group_unref(ThrottleState *ts)
146 {
147 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
148 object_unref(OBJECT(tg));
149 }
150
151 /* Get the name from a ThrottleGroupMember's group. The name (and the pointer)
152 * is guaranteed to remain constant during the lifetime of the group.
153 *
154 * @tgm: a ThrottleGroupMember
155 * @ret: the name of the group.
156 */
157 const char *throttle_group_get_name(ThrottleGroupMember *tgm)
158 {
159 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
160 return tg->name;
161 }
162
163 /* Return the next ThrottleGroupMember in the round-robin sequence, simulating
164 * a circular list.
165 *
166 * This assumes that tg->lock is held.
167 *
168 * @tgm: the current ThrottleGroupMember
169 * @ret: the next ThrottleGroupMember in the sequence
170 */
171 static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
172 {
173 ThrottleState *ts = tgm->throttle_state;
174 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
175 ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
176
177 if (!next) {
178 next = QLIST_FIRST(&tg->head);
179 }
180
181 return next;
182 }
183
184 /*
185 * Return whether a ThrottleGroupMember has pending requests.
186 *
187 * This assumes that tg->lock is held.
188 *
189 * @tgm: the ThrottleGroupMember
190 * @is_write: the type of operation (read/write)
191 * @ret: whether the ThrottleGroupMember has pending requests.
192 */
193 static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
194 bool is_write)
195 {
196 return tgm->pending_reqs[is_write];
197 }
198
199 /* Return the next ThrottleGroupMember in the round-robin sequence with pending
200 * I/O requests.
201 *
202 * This assumes that tg->lock is held.
203 *
204 * @tgm: the current ThrottleGroupMember
205 * @is_write: the type of operation (read/write)
206 * @ret: the next ThrottleGroupMember with pending requests, or tgm if
207 * there is none.
208 */
209 static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
210 bool is_write)
211 {
212 ThrottleState *ts = tgm->throttle_state;
213 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
214 ThrottleGroupMember *token, *start;
215
216 start = token = tg->tokens[is_write];
217
218 /* get next bs round in round robin style */
219 token = throttle_group_next_tgm(token);
220 while (token != start && !tgm_has_pending_reqs(token, is_write)) {
221 token = throttle_group_next_tgm(token);
222 }
223
224 /* If no IO are queued for scheduling on the next round robin token
225 * then decide the token is the current tgm because chances are
226 * the current tgm got the current request queued.
227 */
228 if (token == start && !tgm_has_pending_reqs(token, is_write)) {
229 token = tgm;
230 }
231
232 /* Either we return the original TGM, or one with pending requests */
233 assert(token == tgm || tgm_has_pending_reqs(token, is_write));
234
235 return token;
236 }
237
238 /* Check if the next I/O request for a ThrottleGroupMember needs to be
239 * throttled or not. If there's no timer set in this group, set one and update
240 * the token accordingly.
241 *
242 * This assumes that tg->lock is held.
243 *
244 * @tgm: the current ThrottleGroupMember
245 * @is_write: the type of operation (read/write)
246 * @ret: whether the I/O request needs to be throttled or not
247 */
248 static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
249 bool is_write)
250 {
251 ThrottleState *ts = tgm->throttle_state;
252 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
253 ThrottleTimers *tt = &tgm->throttle_timers;
254 bool must_wait;
255
256 if (atomic_read(&tgm->io_limits_disabled)) {
257 return false;
258 }
259
260 /* Check if any of the timers in this group is already armed */
261 if (tg->any_timer_armed[is_write]) {
262 return true;
263 }
264
265 must_wait = throttle_schedule_timer(ts, tt, is_write);
266
267 /* If a timer just got armed, set tgm as the current token */
268 if (must_wait) {
269 tg->tokens[is_write] = tgm;
270 tg->any_timer_armed[is_write] = true;
271 }
272
273 return must_wait;
274 }
275
276 /* Start the next pending I/O request for a ThrottleGroupMember. Return whether
277 * any request was actually pending.
278 *
279 * @tgm: the current ThrottleGroupMember
280 * @is_write: the type of operation (read/write)
281 */
282 static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
283 bool is_write)
284 {
285 bool ret;
286
287 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
288 ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
289 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
290
291 return ret;
292 }
293
294 /* Look for the next pending I/O request and schedule it.
295 *
296 * This assumes that tg->lock is held.
297 *
298 * @tgm: the current ThrottleGroupMember
299 * @is_write: the type of operation (read/write)
300 */
301 static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
302 {
303 ThrottleState *ts = tgm->throttle_state;
304 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
305 bool must_wait;
306 ThrottleGroupMember *token;
307
308 /* Check if there's any pending request to schedule next */
309 token = next_throttle_token(tgm, is_write);
310 if (!tgm_has_pending_reqs(token, is_write)) {
311 return;
312 }
313
314 /* Set a timer for the request if it needs to be throttled */
315 must_wait = throttle_group_schedule_timer(token, is_write);
316
317 /* If it doesn't have to wait, queue it for immediate execution */
318 if (!must_wait) {
319 /* Give preference to requests from the current tgm */
320 if (qemu_in_coroutine() &&
321 throttle_group_co_restart_queue(tgm, is_write)) {
322 token = tgm;
323 } else {
324 ThrottleTimers *tt = &token->throttle_timers;
325 int64_t now = qemu_clock_get_ns(tg->clock_type);
326 timer_mod(tt->timers[is_write], now);
327 tg->any_timer_armed[is_write] = true;
328 }
329 tg->tokens[is_write] = token;
330 }
331 }
332
333 /* Check if an I/O request needs to be throttled, wait and set a timer
334 * if necessary, and schedule the next request using a round robin
335 * algorithm.
336 *
337 * @tgm: the current ThrottleGroupMember
338 * @bytes: the number of bytes for this I/O
339 * @is_write: the type of operation (read/write)
340 */
341 void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
342 unsigned int bytes,
343 bool is_write)
344 {
345 bool must_wait;
346 ThrottleGroupMember *token;
347 ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
348 qemu_mutex_lock(&tg->lock);
349
350 /* First we check if this I/O has to be throttled. */
351 token = next_throttle_token(tgm, is_write);
352 must_wait = throttle_group_schedule_timer(token, is_write);
353
354 /* Wait if there's a timer set or queued requests of this type */
355 if (must_wait || tgm->pending_reqs[is_write]) {
356 tgm->pending_reqs[is_write]++;
357 qemu_mutex_unlock(&tg->lock);
358 qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
359 qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
360 &tgm->throttled_reqs_lock);
361 qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
362 qemu_mutex_lock(&tg->lock);
363 tgm->pending_reqs[is_write]--;
364 }
365
366 /* The I/O will be executed, so do the accounting */
367 throttle_account(tgm->throttle_state, is_write, bytes);
368
369 /* Schedule the next request */
370 schedule_next_request(tgm, is_write);
371
372 qemu_mutex_unlock(&tg->lock);
373 }
374
375 typedef struct {
376 ThrottleGroupMember *tgm;
377 bool is_write;
378 } RestartData;
379
380 static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
381 {
382 RestartData *data = opaque;
383 ThrottleGroupMember *tgm = data->tgm;
384 ThrottleState *ts = tgm->throttle_state;
385 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
386 bool is_write = data->is_write;
387 bool empty_queue;
388
389 empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
390
391 /* If the request queue was empty then we have to take care of
392 * scheduling the next one */
393 if (empty_queue) {
394 qemu_mutex_lock(&tg->lock);
395 schedule_next_request(tgm, is_write);
396 qemu_mutex_unlock(&tg->lock);
397 }
398 }
399
400 static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
401 {
402 Coroutine *co;
403 RestartData rd = {
404 .tgm = tgm,
405 .is_write = is_write
406 };
407
408 co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
409 aio_co_enter(tgm->aio_context, co);
410 }
411
412 void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
413 {
414 if (tgm->throttle_state) {
415 throttle_group_restart_queue(tgm, 0);
416 throttle_group_restart_queue(tgm, 1);
417 }
418 }
419
420 /* Update the throttle configuration for a particular group. Similar
421 * to throttle_config(), but guarantees atomicity within the
422 * throttling group.
423 *
424 * @tgm: a ThrottleGroupMember that is a member of the group
425 * @cfg: the configuration to set
426 */
427 void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
428 {
429 ThrottleState *ts = tgm->throttle_state;
430 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
431 qemu_mutex_lock(&tg->lock);
432 throttle_config(ts, tg->clock_type, cfg);
433 qemu_mutex_unlock(&tg->lock);
434
435 throttle_group_restart_tgm(tgm);
436 }
437
438 /* Get the throttle configuration from a particular group. Similar to
439 * throttle_get_config(), but guarantees atomicity within the
440 * throttling group.
441 *
442 * @tgm: a ThrottleGroupMember that is a member of the group
443 * @cfg: the configuration will be written here
444 */
445 void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
446 {
447 ThrottleState *ts = tgm->throttle_state;
448 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
449 qemu_mutex_lock(&tg->lock);
450 throttle_get_config(ts, cfg);
451 qemu_mutex_unlock(&tg->lock);
452 }
453
454 /* ThrottleTimers callback. This wakes up a request that was waiting
455 * because it had been throttled.
456 *
457 * @tgm: the ThrottleGroupMember whose request had been throttled
458 * @is_write: the type of operation (read/write)
459 */
460 static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
461 {
462 ThrottleState *ts = tgm->throttle_state;
463 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
464
465 /* The timer has just been fired, so we can update the flag */
466 qemu_mutex_lock(&tg->lock);
467 tg->any_timer_armed[is_write] = false;
468 qemu_mutex_unlock(&tg->lock);
469
470 /* Run the request that was waiting for this timer */
471 throttle_group_restart_queue(tgm, is_write);
472 }
473
474 static void read_timer_cb(void *opaque)
475 {
476 timer_cb(opaque, false);
477 }
478
479 static void write_timer_cb(void *opaque)
480 {
481 timer_cb(opaque, true);
482 }
483
484 /* Register a ThrottleGroupMember from the throttling group, also initializing
485 * its timers and updating its throttle_state pointer to point to it. If a
486 * throttling group with that name does not exist yet, it will be created.
487 *
488 * This function edits throttle_groups and must be called under the global
489 * mutex.
490 *
491 * @tgm: the ThrottleGroupMember to insert
492 * @groupname: the name of the group
493 * @ctx: the AioContext to use
494 */
495 void throttle_group_register_tgm(ThrottleGroupMember *tgm,
496 const char *groupname,
497 AioContext *ctx)
498 {
499 int i;
500 ThrottleState *ts = throttle_group_incref(groupname);
501 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
502
503 tgm->throttle_state = ts;
504 tgm->aio_context = ctx;
505
506 qemu_mutex_lock(&tg->lock);
507 /* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
508 for (i = 0; i < 2; i++) {
509 if (!tg->tokens[i]) {
510 tg->tokens[i] = tgm;
511 }
512 }
513
514 QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
515
516 throttle_timers_init(&tgm->throttle_timers,
517 tgm->aio_context,
518 tg->clock_type,
519 read_timer_cb,
520 write_timer_cb,
521 tgm);
522 qemu_co_mutex_init(&tgm->throttled_reqs_lock);
523 qemu_co_queue_init(&tgm->throttled_reqs[0]);
524 qemu_co_queue_init(&tgm->throttled_reqs[1]);
525
526 qemu_mutex_unlock(&tg->lock);
527 }
528
529 /* Unregister a ThrottleGroupMember from its group, removing it from the list,
530 * destroying the timers and setting the throttle_state pointer to NULL.
531 *
532 * The ThrottleGroupMember must not have pending throttled requests, so the
533 * caller has to drain them first.
534 *
535 * The group will be destroyed if it's empty after this operation.
536 *
537 * @tgm the ThrottleGroupMember to remove
538 */
539 void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
540 {
541 ThrottleState *ts = tgm->throttle_state;
542 ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
543 ThrottleGroupMember *token;
544 int i;
545
546 assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
547 assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
548 assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
549
550 qemu_mutex_lock(&tg->lock);
551 for (i = 0; i < 2; i++) {
552 if (tg->tokens[i] == tgm) {
553 token = throttle_group_next_tgm(tgm);
554 /* Take care of the case where this is the last tgm in the group */
555 if (token == tgm) {
556 token = NULL;
557 }
558 tg->tokens[i] = token;
559 }
560 }
561
562 /* remove the current tgm from the list */
563 QLIST_REMOVE(tgm, round_robin);
564 throttle_timers_destroy(&tgm->throttle_timers);
565 qemu_mutex_unlock(&tg->lock);
566
567 throttle_group_unref(&tg->ts);
568 tgm->throttle_state = NULL;
569 }
570
571 void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
572 AioContext *new_context)
573 {
574 ThrottleTimers *tt = &tgm->throttle_timers;
575 throttle_timers_attach_aio_context(tt, new_context);
576 tgm->aio_context = new_context;
577 }
578
579 void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
580 {
581 ThrottleTimers *tt = &tgm->throttle_timers;
582 throttle_timers_detach_aio_context(tt);
583 tgm->aio_context = NULL;
584 }
585
586 #undef THROTTLE_OPT_PREFIX
587 #define THROTTLE_OPT_PREFIX "x-"
588
589 /* Helper struct and array for QOM property setter/getter */
590 typedef struct {
591 const char *name;
592 BucketType type;
593 enum {
594 AVG,
595 MAX,
596 BURST_LENGTH,
597 IOPS_SIZE,
598 } category;
599 } ThrottleParamInfo;
600
601 static ThrottleParamInfo properties[] = {
602 {
603 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,
604 THROTTLE_OPS_TOTAL, AVG,
605 },
606 {
607 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,
608 THROTTLE_OPS_TOTAL, MAX,
609 },
610 {
611 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,
612 THROTTLE_OPS_TOTAL, BURST_LENGTH,
613 },
614 {
615 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,
616 THROTTLE_OPS_READ, AVG,
617 },
618 {
619 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,
620 THROTTLE_OPS_READ, MAX,
621 },
622 {
623 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,
624 THROTTLE_OPS_READ, BURST_LENGTH,
625 },
626 {
627 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,
628 THROTTLE_OPS_WRITE, AVG,
629 },
630 {
631 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,
632 THROTTLE_OPS_WRITE, MAX,
633 },
634 {
635 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,
636 THROTTLE_OPS_WRITE, BURST_LENGTH,
637 },
638 {
639 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,
640 THROTTLE_BPS_TOTAL, AVG,
641 },
642 {
643 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,
644 THROTTLE_BPS_TOTAL, MAX,
645 },
646 {
647 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,
648 THROTTLE_BPS_TOTAL, BURST_LENGTH,
649 },
650 {
651 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,
652 THROTTLE_BPS_READ, AVG,
653 },
654 {
655 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,
656 THROTTLE_BPS_READ, MAX,
657 },
658 {
659 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,
660 THROTTLE_BPS_READ, BURST_LENGTH,
661 },
662 {
663 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,
664 THROTTLE_BPS_WRITE, AVG,
665 },
666 {
667 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,
668 THROTTLE_BPS_WRITE, MAX,
669 },
670 {
671 THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,
672 THROTTLE_BPS_WRITE, BURST_LENGTH,
673 },
674 {
675 THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,
676 0, IOPS_SIZE,
677 }
678 };
679
680 /* This function edits throttle_groups and must be called under the global
681 * mutex */
682 static void throttle_group_obj_init(Object *obj)
683 {
684 ThrottleGroup *tg = THROTTLE_GROUP(obj);
685
686 tg->clock_type = QEMU_CLOCK_REALTIME;
687 if (qtest_enabled()) {
688 /* For testing block IO throttling only */
689 tg->clock_type = QEMU_CLOCK_VIRTUAL;
690 }
691 tg->is_initialized = false;
692 qemu_mutex_init(&tg->lock);
693 throttle_init(&tg->ts);
694 QLIST_INIT(&tg->head);
695 }
696
697 /* This function edits throttle_groups and must be called under the global
698 * mutex */
699 static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
700 {
701 ThrottleGroup *tg = THROTTLE_GROUP(obj);
702 ThrottleConfig cfg;
703
704 /* set group name to object id if it exists */
705 if (!tg->name && tg->parent_obj.parent) {
706 tg->name = object_get_canonical_path_component(OBJECT(obj));
707 }
708 /* We must have a group name at this point */
709 assert(tg->name);
710
711 /* error if name is duplicate */
712 if (throttle_group_by_name(tg->name) != NULL) {
713 error_setg(errp, "A group with this name already exists");
714 return;
715 }
716
717 /* check validity */
718 throttle_get_config(&tg->ts, &cfg);
719 if (!throttle_is_valid(&cfg, errp)) {
720 return;
721 }
722 throttle_config(&tg->ts, tg->clock_type, &cfg);
723 QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
724 tg->is_initialized = true;
725 }
726
727 /* This function edits throttle_groups and must be called under the global
728 * mutex */
729 static void throttle_group_obj_finalize(Object *obj)
730 {
731 ThrottleGroup *tg = THROTTLE_GROUP(obj);
732 if (tg->is_initialized) {
733 QTAILQ_REMOVE(&throttle_groups, tg, list);
734 }
735 qemu_mutex_destroy(&tg->lock);
736 g_free(tg->name);
737 }
738
739 static void throttle_group_set(Object *obj, Visitor *v, const char * name,
740 void *opaque, Error **errp)
741
742 {
743 ThrottleGroup *tg = THROTTLE_GROUP(obj);
744 ThrottleConfig *cfg;
745 ThrottleParamInfo *info = opaque;
746 Error *local_err = NULL;
747 int64_t value;
748
749 /* If we have finished initialization, don't accept individual property
750 * changes through QOM. Throttle configuration limits must be set in one
751 * transaction, as certain combinations are invalid.
752 */
753 if (tg->is_initialized) {
754 error_setg(&local_err, "Property cannot be set after initialization");
755 goto ret;
756 }
757
758 visit_type_int64(v, name, &value, &local_err);
759 if (local_err) {
760 goto ret;
761 }
762 if (value < 0) {
763 error_setg(&local_err, "Property values cannot be negative");
764 goto ret;
765 }
766
767 cfg = &tg->ts.cfg;
768 switch (info->category) {
769 case AVG:
770 cfg->buckets[info->type].avg = value;
771 break;
772 case MAX:
773 cfg->buckets[info->type].max = value;
774 break;
775 case BURST_LENGTH:
776 if (value > UINT_MAX) {
777 error_setg(&local_err, "%s value must be in the"
778 "range [0, %u]", info->name, UINT_MAX);
779 goto ret;
780 }
781 cfg->buckets[info->type].burst_length = value;
782 break;
783 case IOPS_SIZE:
784 cfg->op_size = value;
785 break;
786 }
787
788 ret:
789 error_propagate(errp, local_err);
790 return;
791
792 }
793
794 static void throttle_group_get(Object *obj, Visitor *v, const char *name,
795 void *opaque, Error **errp)
796 {
797 ThrottleGroup *tg = THROTTLE_GROUP(obj);
798 ThrottleConfig cfg;
799 ThrottleParamInfo *info = opaque;
800 int64_t value;
801
802 throttle_get_config(&tg->ts, &cfg);
803 switch (info->category) {
804 case AVG:
805 value = cfg.buckets[info->type].avg;
806 break;
807 case MAX:
808 value = cfg.buckets[info->type].max;
809 break;
810 case BURST_LENGTH:
811 value = cfg.buckets[info->type].burst_length;
812 break;
813 case IOPS_SIZE:
814 value = cfg.op_size;
815 break;
816 }
817
818 visit_type_int64(v, name, &value, errp);
819 }
820
821 static void throttle_group_set_limits(Object *obj, Visitor *v,
822 const char *name, void *opaque,
823 Error **errp)
824
825 {
826 ThrottleGroup *tg = THROTTLE_GROUP(obj);
827 ThrottleConfig cfg;
828 ThrottleLimits arg = { 0 };
829 ThrottleLimits *argp = &arg;
830 Error *local_err = NULL;
831
832 visit_type_ThrottleLimits(v, name, &argp, &local_err);
833 if (local_err) {
834 goto ret;
835 }
836 qemu_mutex_lock(&tg->lock);
837 throttle_get_config(&tg->ts, &cfg);
838 throttle_limits_to_config(argp, &cfg, &local_err);
839 if (local_err) {
840 goto unlock;
841 }
842 throttle_config(&tg->ts, tg->clock_type, &cfg);
843
844 unlock:
845 qemu_mutex_unlock(&tg->lock);
846 ret:
847 error_propagate(errp, local_err);
848 return;
849 }
850
851 static void throttle_group_get_limits(Object *obj, Visitor *v,
852 const char *name, void *opaque,
853 Error **errp)
854 {
855 ThrottleGroup *tg = THROTTLE_GROUP(obj);
856 ThrottleConfig cfg;
857 ThrottleLimits arg = { 0 };
858 ThrottleLimits *argp = &arg;
859
860 qemu_mutex_lock(&tg->lock);
861 throttle_get_config(&tg->ts, &cfg);
862 qemu_mutex_unlock(&tg->lock);
863
864 throttle_config_to_limits(&cfg, argp);
865
866 visit_type_ThrottleLimits(v, name, &argp, errp);
867 }
868
869 static bool throttle_group_can_be_deleted(UserCreatable *uc)
870 {
871 return OBJECT(uc)->ref == 1;
872 }
873
874 static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
875 {
876 size_t i = 0;
877 UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
878
879 ucc->complete = throttle_group_obj_complete;
880 ucc->can_be_deleted = throttle_group_can_be_deleted;
881
882 /* individual properties */
883 for (i = 0; i < sizeof(properties) / sizeof(ThrottleParamInfo); i++) {
884 object_class_property_add(klass,
885 properties[i].name,
886 "int",
887 throttle_group_get,
888 throttle_group_set,
889 NULL, &properties[i],
890 &error_abort);
891 }
892
893 /* ThrottleLimits */
894 object_class_property_add(klass,
895 "limits", "ThrottleLimits",
896 throttle_group_get_limits,
897 throttle_group_set_limits,
898 NULL, NULL,
899 &error_abort);
900 }
901
902 static const TypeInfo throttle_group_info = {
903 .name = TYPE_THROTTLE_GROUP,
904 .parent = TYPE_OBJECT,
905 .class_init = throttle_group_obj_class_init,
906 .instance_size = sizeof(ThrottleGroup),
907 .instance_init = throttle_group_obj_init,
908 .instance_finalize = throttle_group_obj_finalize,
909 .interfaces = (InterfaceInfo[]) {
910 { TYPE_USER_CREATABLE },
911 { }
912 },
913 };
914
915 static void throttle_groups_init(void)
916 {
917 type_register_static(&throttle_group_info);
918 }
919
920 type_init(throttle_groups_init);