]> git.proxmox.com Git - pve-qemu-kvm.git/blame - debian/patches/0002-add-basic-backup-support-to-block-driver.patch
update backup patches
[pve-qemu-kvm.git] / debian / patches / 0002-add-basic-backup-support-to-block-driver.patch
CommitLineData
2dfd543c 1From 26891c19f68a14927e8c52417112d80668be1b3a Mon Sep 17 00:00:00 2001
5ad5891c
DM
2From: Dietmar Maurer <dietmar@proxmox.com>
3Date: Tue, 13 Nov 2012 10:03:52 +0100
55827521 4Subject: [PATCH v3 2/6] add basic backup support to block driver
5ad5891c 5
309874bd
DM
6Function backup_job_create() creates a block job to backup a block device.
7The coroutine is started with backup_job_start().
5ad5891c
DM
8
9We call backup_do_cow() for each write during backup. That function
10reads the original data and pass it to backup_dump_cb().
11
12The tracked_request infrastructure is used to serialize access.
13
14Currently backup cluster size is hardcoded to 65536 bytes.
15
16Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
17---
18 Makefile.objs | 1 +
2dfd543c 19 backup.c | 335 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
309874bd 20 backup.h | 32 ++++++
55827521 21 block.c | 71 +++++++++++-
5ad5891c
DM
22 block.h | 2 +
23 blockjob.h | 10 ++
2dfd543c 24 6 files changed, 445 insertions(+), 6 deletions(-)
5ad5891c
DM
25 create mode 100644 backup.c
26 create mode 100644 backup.h
27
28diff --git a/Makefile.objs b/Makefile.objs
29index 3c7abca..cb46be5 100644
30--- a/Makefile.objs
31+++ b/Makefile.objs
32@@ -48,6 +48,7 @@ coroutine-obj-$(CONFIG_WIN32) += coroutine-win32.o
33 block-obj-y = iov.o cache-utils.o qemu-option.o module.o async.o
34 block-obj-y += nbd.o block.o blockjob.o aes.o qemu-config.o
35 block-obj-y += thread-pool.o qemu-progress.o qemu-sockets.o uri.o notify.o
36+block-obj-y += backup.o
37 block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y)
38 block-obj-$(CONFIG_POSIX) += event_notifier-posix.o aio-posix.o
39 block-obj-$(CONFIG_WIN32) += event_notifier-win32.o aio-win32.o
40diff --git a/backup.c b/backup.c
41new file mode 100644
2dfd543c 42index 0000000..3ccb74c
5ad5891c
DM
43--- /dev/null
44+++ b/backup.c
2dfd543c 45@@ -0,0 +1,335 @@
5ad5891c
DM
46+/*
47+ * QEMU backup
48+ *
49+ * Copyright (C) 2012 Proxmox Server Solutions
50+ *
51+ * Authors:
52+ * Dietmar Maurer (dietmar@proxmox.com)
53+ *
54+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
55+ * See the COPYING file in the top-level directory.
56+ *
57+ */
58+
59+#include <stdio.h>
60+#include <errno.h>
61+#include <unistd.h>
62+
63+#include "block.h"
64+#include "block_int.h"
65+#include "blockjob.h"
55827521 66+#include "qemu/ratelimit.h"
5ad5891c
DM
67+#include "backup.h"
68+
69+#define DEBUG_BACKUP 0
70+
71+#define DPRINTF(fmt, ...) \
72+ do { if (DEBUG_BACKUP) { printf("backup: " fmt, ## __VA_ARGS__); } } \
73+ while (0)
74+
75+
76+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
55827521 77+#define SLICE_TIME 100000000ULL /* ns */
5ad5891c
DM
78+
79+typedef struct BackupBlockJob {
80+ BlockJob common;
55827521
DM
81+ RateLimit limit;
82+ uint64_t sectors_read;
5ad5891c
DM
83+ unsigned long *bitmap;
84+ int bitmap_size;
85+ BackupDumpFunc *backup_dump_cb;
86+ BlockDriverCompletionFunc *backup_complete_cb;
87+ void *opaque;
88+} BackupBlockJob;
89+
309874bd 90+static int backup_get_bitmap(BackupBlockJob *job, int64_t cluster_num)
5ad5891c 91+{
5ad5891c
DM
92+ assert(job);
93+ assert(job->bitmap);
94+
95+ unsigned long val, idx, bit;
96+
97+ idx = cluster_num / BITS_PER_LONG;
98+
99+ assert(job->bitmap_size > idx);
100+
101+ bit = cluster_num % BITS_PER_LONG;
102+ val = job->bitmap[idx];
103+
104+ return !!(val & (1UL << bit));
105+}
106+
309874bd 107+static void backup_set_bitmap(BackupBlockJob *job, int64_t cluster_num,
5ad5891c
DM
108+ int dirty)
109+{
5ad5891c
DM
110+ assert(job);
111+ assert(job->bitmap);
112+
113+ unsigned long val, idx, bit;
114+
115+ idx = cluster_num / BITS_PER_LONG;
116+
117+ assert(job->bitmap_size > idx);
118+
119+ bit = cluster_num % BITS_PER_LONG;
120+ val = job->bitmap[idx];
121+ if (dirty) {
122+ if (!(val & (1UL << bit))) {
123+ val |= 1UL << bit;
124+ }
125+ } else {
126+ if (val & (1UL << bit)) {
127+ val &= ~(1UL << bit);
128+ }
129+ }
130+ job->bitmap[idx] = val;
131+}
132+
133+static int backup_in_progress_count;
134+
135+static int coroutine_fn backup_do_cow(BlockDriverState *bs,
136+ int64_t sector_num, int nb_sectors)
137+{
138+ assert(bs);
139+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
140+ assert(job);
141+
142+ BlockDriver *drv = bs->drv;
143+ struct iovec iov;
144+ QEMUIOVector bounce_qiov;
145+ void *bounce_buffer = NULL;
146+ int ret = 0;
147+
148+ backup_in_progress_count++;
149+
150+ int64_t start, end;
151+
152+ start = sector_num / BACKUP_BLOCKS_PER_CLUSTER;
153+ end = (sector_num + nb_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
154+ BACKUP_BLOCKS_PER_CLUSTER;
155+
156+ DPRINTF("brdv_co_backup_cow enter %s C%zd %zd %d\n",
157+ bdrv_get_device_name(bs), start, sector_num, nb_sectors);
158+
159+ for (; start < end; start++) {
309874bd 160+ if (backup_get_bitmap(job, start)) {
5ad5891c
DM
161+ DPRINTF("brdv_co_backup_cow skip C%zd\n", start);
162+ continue; /* already copied */
163+ }
164+
165+ /* immediately set bitmap (avoid coroutine race) */
309874bd 166+ backup_set_bitmap(job, start, 1);
5ad5891c
DM
167+
168+ DPRINTF("brdv_co_backup_cow C%zd\n", start);
169+
170+ if (!bounce_buffer) {
171+ iov.iov_len = BACKUP_CLUSTER_SIZE;
172+ iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
173+ qemu_iovec_init_external(&bounce_qiov, &iov, 1);
174+ }
175+
176+ ret = drv->bdrv_co_readv(bs, start * BACKUP_BLOCKS_PER_CLUSTER,
177+ BACKUP_BLOCKS_PER_CLUSTER,
178+ &bounce_qiov);
55827521
DM
179+
180+ job->sectors_read += BACKUP_BLOCKS_PER_CLUSTER;
181+
5ad5891c
DM
182+ if (ret < 0) {
183+ DPRINTF("brdv_co_backup_cow bdrv_read C%zd failed\n", start);
184+ goto out;
185+ }
186+
187+ ret = job->backup_dump_cb(job->opaque, bs, start, bounce_buffer);
188+ if (ret < 0) {
189+ DPRINTF("brdv_co_backup_cow dump_cluster_cb C%zd failed\n", start);
190+ goto out;
191+ }
192+
193+ DPRINTF("brdv_co_backup_cow done C%zd\n", start);
194+ }
195+
196+out:
197+ if (bounce_buffer) {
198+ qemu_vfree(bounce_buffer);
199+ }
200+
201+ backup_in_progress_count--;
202+
203+ return ret;
204+}
205+
206+static int coroutine_fn backup_before_read(BlockDriverState *bs,
207+ int64_t sector_num,
208+ int nb_sectors, QEMUIOVector *qiov)
209+{
210+ return backup_do_cow(bs, sector_num, nb_sectors);
211+}
212+
213+static int coroutine_fn backup_before_write(BlockDriverState *bs,
214+ int64_t sector_num,
215+ int nb_sectors, QEMUIOVector *qiov)
216+{
217+ return backup_do_cow(bs, sector_num, nb_sectors);
218+}
219+
55827521
DM
220+static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
221+{
222+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
223+
224+ if (speed < 0) {
225+ error_set(errp, QERR_INVALID_PARAMETER, "speed");
226+ return;
227+ }
228+ ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
229+}
5ad5891c
DM
230+
231+static BlockJobType backup_job_type = {
232+ .instance_size = sizeof(BackupBlockJob),
233+ .before_read = backup_before_read,
234+ .before_write = backup_before_write,
55827521
DM
235+ .job_type = "backup",
236+ .set_speed = backup_set_speed,
5ad5891c
DM
237+};
238+
239+static void coroutine_fn backup_run(void *opaque)
240+{
241+ BackupBlockJob *job = opaque;
242+ BlockDriverState *bs = job->common.bs;
243+ assert(bs);
244+
245+ int64_t start, end;
246+
247+ start = 0;
248+ end = (bs->total_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
249+ BACKUP_BLOCKS_PER_CLUSTER;
250+
251+ DPRINTF("backup_run start %s %zd %zd\n", bdrv_get_device_name(bs),
252+ start, end);
253+
254+ int ret = 0;
255+
256+ for (; start < end; start++) {
257+ if (block_job_is_cancelled(&job->common)) {
258+ ret = -1;
259+ break;
260+ }
261+
5ad5891c
DM
262+ /* we need to yield so that qemu_aio_flush() returns.
263+ * (without, VM does not reboot)
309874bd 264+ * Note: use 1000 instead of 0 (0 priorize this task too much)
5ad5891c 265+ */
55827521
DM
266+ if (job->common.speed) {
267+ uint64_t delay_ns = ratelimit_calculate_delay(
268+ &job->limit, job->sectors_read);
269+ job->sectors_read = 0;
270+ block_job_sleep_ns(&job->common, rt_clock, delay_ns);
271+ } else {
272+ block_job_sleep_ns(&job->common, rt_clock, 1000);
273+ }
274+
5ad5891c
DM
275+ if (block_job_is_cancelled(&job->common)) {
276+ ret = -1;
277+ break;
278+ }
55827521
DM
279+
280+ if (backup_get_bitmap(job, start)) {
281+ continue; /* already copied */
282+ }
283+
5ad5891c
DM
284+ DPRINTF("backup_run loop C%zd\n", start);
285+
286+ /**
287+ * This triggers a cluster copy
288+ * Note: avoid direct call to brdv_co_backup_cow, because
289+ * this does not call tracked_request_begin()
290+ */
291+ ret = bdrv_co_backup(bs, start*BACKUP_BLOCKS_PER_CLUSTER, 1);
292+ if (ret < 0) {
293+ break;
294+ }
295+ /* Publish progress */
296+ job->common.offset += BACKUP_CLUSTER_SIZE;
297+ }
298+
299+ while (backup_in_progress_count > 0) {
300+ DPRINTF("backup_run backup_in_progress_count != 0 (%d)",
301+ backup_in_progress_count);
2dfd543c
DM
302+ block_job_sleep_ns(&job->common, rt_clock, 10000);
303+
5ad5891c
DM
304+ }
305+
306+ DPRINTF("backup_run complete %d\n", ret);
307+ block_job_completed(&job->common, ret);
308+}
309+
310+static void backup_job_cleanup_cb(void *opaque, int ret)
311+{
312+ BlockDriverState *bs = opaque;
313+ assert(bs);
314+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
315+ assert(job);
316+
317+ DPRINTF("backup_job_cleanup_cb start %d\n", ret);
318+
319+ job->backup_complete_cb(job->opaque, ret);
320+
321+ DPRINTF("backup_job_cleanup_cb end\n");
322+
323+ g_free(job->bitmap);
324+}
325+
309874bd
DM
326+void
327+backup_job_start(BlockDriverState *bs)
328+{
329+ assert(bs);
330+ assert(bs->job);
331+ assert(bs->job->co == NULL);
332+
333+ bs->job->co = qemu_coroutine_create(backup_run);
334+ qemu_coroutine_enter(bs->job->co, bs->job);
335+}
336+
5ad5891c 337+int
309874bd
DM
338+backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
339+ BlockDriverCompletionFunc *backup_complete_cb,
55827521 340+ void *opaque, int64_t speed)
5ad5891c
DM
341+{
342+ assert(bs);
343+ assert(backup_dump_cb);
344+ assert(backup_complete_cb);
345+
346+ if (bs->job) {
347+ DPRINTF("bdrv_backup_init failed - running job on %s\n",
348+ bdrv_get_device_name(bs));
349+ return -1;
350+ }
351+
352+ int64_t bitmap_size;
353+ const char *devname = bdrv_get_device_name(bs);
354+
355+ if (!devname || !devname[0]) {
356+ return -1;
357+ }
358+
359+ DPRINTF("bdrv_backup_init %s\n", bdrv_get_device_name(bs));
360+
361+ Error *errp;
55827521 362+ BackupBlockJob *job = block_job_create(&backup_job_type, bs, speed,
5ad5891c
DM
363+ backup_job_cleanup_cb, bs, &errp);
364+
365+ job->common.cluster_size = BACKUP_CLUSTER_SIZE;
366+
367+ bitmap_size = bs->total_sectors +
368+ BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG - 1;
369+ bitmap_size /= BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG;
370+
371+ job->backup_dump_cb = backup_dump_cb;
372+ job->backup_complete_cb = backup_complete_cb;
373+ job->opaque = opaque;
374+ job->bitmap_size = bitmap_size;
375+ job->bitmap = g_new0(unsigned long, bitmap_size);
376+
377+ job->common.len = bs->total_sectors*BDRV_SECTOR_SIZE;
309874bd 378+
5ad5891c
DM
379+ return 0;
380+}
381diff --git a/backup.h b/backup.h
382new file mode 100644
55827521 383index 0000000..a5f85e6
5ad5891c
DM
384--- /dev/null
385+++ b/backup.h
309874bd 386@@ -0,0 +1,32 @@
5ad5891c
DM
387+/*
388+ * QEMU backup related definitions
389+ *
390+ * Copyright (C) Proxmox Server Solutions
391+ *
392+ * Authors:
393+ * Dietmar Maurer (dietmar@proxmox.com)
394+ *
395+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
396+ * See the COPYING file in the top-level directory.
397+ *
398+ */
399+
400+#ifndef QEMU_BACKUP_H
401+#define QEMU_BACKUP_H
402+
403+#include <uuid/uuid.h>
404+
405+#define BACKUP_CLUSTER_BITS 16
406+#define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
407+#define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
408+
409+typedef int BackupDumpFunc(void *opaque, BlockDriverState *bs,
410+ int64_t cluster_num, unsigned char *buf);
411+
309874bd
DM
412+void backup_job_start(BlockDriverState *bs);
413+
414+int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
415+ BlockDriverCompletionFunc *backup_complete_cb,
55827521 416+ void *opaque, int64_t speed);
5ad5891c
DM
417+
418+#endif /* QEMU_BACKUP_H */
419diff --git a/block.c b/block.c
309874bd 420index c05875f..4de7fbd 100644
5ad5891c
DM
421--- a/block.c
422+++ b/block.c
423@@ -54,6 +54,7 @@
424 typedef enum {
425 BDRV_REQ_COPY_ON_READ = 0x1,
426 BDRV_REQ_ZERO_WRITE = 0x2,
427+ BDRV_REQ_BACKUP_ONLY = 0x4,
428 } BdrvRequestFlags;
429
430 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
431@@ -1542,7 +1543,7 @@ int bdrv_commit(BlockDriverState *bs)
432
433 if (!drv)
434 return -ENOMEDIUM;
435-
436+
437 if (!bs->backing_hd) {
438 return -ENOTSUP;
439 }
440@@ -1679,6 +1680,22 @@ static void round_to_clusters(BlockDriverState *bs,
441 }
442 }
443
444+/**
445+ * Round a region to job cluster boundaries
446+ */
447+static void round_to_job_clusters(BlockDriverState *bs,
448+ int64_t sector_num, int nb_sectors,
449+ int job_cluster_size,
450+ int64_t *cluster_sector_num,
451+ int *cluster_nb_sectors)
452+{
453+ int64_t c = job_cluster_size/BDRV_SECTOR_SIZE;
454+
455+ *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
456+ *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
457+ nb_sectors, c);
458+}
459+
460 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
461 int64_t sector_num, int nb_sectors) {
462 /* aaaa bbbb */
463@@ -1693,7 +1710,9 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
464 }
465
466 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
467- int64_t sector_num, int nb_sectors)
468+ int64_t sector_num,
469+ int nb_sectors,
470+ int job_cluster_size)
471 {
472 BdrvTrackedRequest *req;
473 int64_t cluster_sector_num;
474@@ -1709,6 +1728,11 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
475 round_to_clusters(bs, sector_num, nb_sectors,
476 &cluster_sector_num, &cluster_nb_sectors);
477
478+ if (job_cluster_size) {
479+ round_to_job_clusters(bs, sector_num, nb_sectors, job_cluster_size,
480+ &cluster_sector_num, &cluster_nb_sectors);
481+ }
482+
483 do {
484 retry = false;
485 QLIST_FOREACH(req, &bs->tracked_requests, list) {
486@@ -2278,12 +2302,24 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
487 bs->copy_on_read_in_flight++;
488 }
489
490- if (bs->copy_on_read_in_flight) {
491- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
492+ int job_cluster_size = bs->job && bs->job->cluster_size ?
493+ bs->job->cluster_size : 0;
494+
495+ if (bs->copy_on_read_in_flight || job_cluster_size) {
496+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
497+ job_cluster_size);
498 }
499
500 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
501
502+ if (bs->job && bs->job->job_type->before_read) {
503+ ret = bs->job->job_type->before_read(bs, sector_num, nb_sectors, qiov);
309874bd 504+ if ((ret < 0) || (flags & BDRV_REQ_BACKUP_ONLY)) {
5ad5891c
DM
505+ /* Note: We do not return any data to the caller */
506+ goto out;
507+ }
508+ }
509+
510 if (flags & BDRV_REQ_COPY_ON_READ) {
511 int pnum;
512
513@@ -2327,6 +2363,17 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
514 BDRV_REQ_COPY_ON_READ);
515 }
516
517+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
518+ int64_t sector_num, int nb_sectors)
519+{
520+ if (!bs->job) {
521+ return -ENOTSUP;
522+ }
523+
524+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, NULL,
525+ BDRV_REQ_BACKUP_ONLY);
526+}
527+
528 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
529 int64_t sector_num, int nb_sectors)
530 {
531@@ -2384,12 +2431,23 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
532 bdrv_io_limits_intercept(bs, true, nb_sectors);
533 }
534
535- if (bs->copy_on_read_in_flight) {
536- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
537+ int job_cluster_size = bs->job && bs->job->cluster_size ?
538+ bs->job->cluster_size : 0;
539+
540+ if (bs->copy_on_read_in_flight || job_cluster_size) {
541+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
542+ job_cluster_size);
543 }
544
545 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
546
547+ if (bs->job && bs->job->job_type->before_write) {
548+ ret = bs->job->job_type->before_write(bs, sector_num, nb_sectors, qiov);
549+ if (ret < 0) {
550+ goto out;
551+ }
552+ }
553+
554 if (flags & BDRV_REQ_ZERO_WRITE) {
555 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
556 } else {
557@@ -2408,6 +2466,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
558 bs->wr_highest_sector = sector_num + nb_sectors - 1;
559 }
560
561+out:
562 tracked_request_end(&req);
563
564 return ret;
565diff --git a/block.h b/block.h
566index 722c620..94e5903 100644
567--- a/block.h
568+++ b/block.h
569@@ -172,6 +172,8 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
570 int nb_sectors, QEMUIOVector *qiov);
571 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
572 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
573+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
574+ int64_t sector_num, int nb_sectors);
575 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
576 int nb_sectors, QEMUIOVector *qiov);
577 /*
578diff --git a/blockjob.h b/blockjob.h
579index 3792b73..6621173 100644
580--- a/blockjob.h
581+++ b/blockjob.h
582@@ -50,6 +50,13 @@ typedef struct BlockJobType {
583 * manually.
584 */
585 void (*complete)(BlockJob *job, Error **errp);
586+
587+ /** tracked requests */
588+ int coroutine_fn (*before_read)(BlockDriverState *bs, int64_t sector_num,
589+ int nb_sectors, QEMUIOVector *qiov);
590+ int coroutine_fn (*before_write)(BlockDriverState *bs, int64_t sector_num,
591+ int nb_sectors, QEMUIOVector *qiov);
592+
593 } BlockJobType;
594
595 /**
596@@ -103,6 +110,9 @@ struct BlockJob {
597 /** Speed that was set with @block_job_set_speed. */
598 int64_t speed;
599
600+ /** tracked requests */
601+ int cluster_size;
602+
603 /** The completion function that will be called when the job completes. */
604 BlockDriverCompletionFunc *cb;
605
606--
6071.7.2.5
608