]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
src/config/acl.rs: introduce more/better datastore privileges
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::convert::TryFrom;
3
4 use chrono::{TimeZone, Local};
5 use anyhow::{bail, Error};
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::{
12 api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
13 RpcEnvironment, RpcEnvironmentType, Permission};
14 use proxmox::api::router::SubdirMap;
15 use proxmox::api::schema::*;
16 use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
17 use proxmox::try_block;
18 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
19
20 use crate::api2::types::*;
21 use crate::backup::*;
22 use crate::config::datastore;
23 use crate::server::WorkerTask;
24 use crate::tools;
25 use crate::config::acl::{
26 PRIV_DATASTORE_AUDIT,
27 PRIV_DATASTORE_READ,
28 PRIV_DATASTORE_PRUNE,
29 PRIV_DATASTORE_CREATE_BACKUP,
30 };
31
32 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
33
34 let mut path = store.base_path();
35 path.push(backup_dir.relative_path());
36 path.push("index.json.blob");
37
38 let raw_data = file_get_contents(&path)?;
39 let index_size = raw_data.len() as u64;
40 let blob = DataBlob::from_raw(raw_data)?;
41
42 let manifest = BackupManifest::try_from(blob)?;
43
44 let mut result = Vec::new();
45 for item in manifest.files() {
46 result.push(BackupContent {
47 filename: item.filename.clone(),
48 size: Some(item.size),
49 });
50 }
51
52 result.push(BackupContent {
53 filename: "index.json.blob".to_string(),
54 size: Some(index_size),
55 });
56
57 Ok(result)
58 }
59
60 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
61
62 let mut group_hash = HashMap::new();
63
64 for info in backup_list {
65 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
66 let time_list = group_hash.entry(group_id).or_insert(vec![]);
67 time_list.push(info);
68 }
69
70 group_hash
71 }
72
73 #[api(
74 input: {
75 properties: {
76 store: {
77 schema: DATASTORE_SCHEMA,
78 },
79 },
80 },
81 returns: {
82 type: Array,
83 description: "Returns the list of backup groups.",
84 items: {
85 type: GroupListItem,
86 }
87 },
88 access: {
89 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
90 },
91 )]
92 /// List backup groups.
93 fn list_groups(
94 store: String,
95 ) -> Result<Vec<GroupListItem>, Error> {
96
97 let datastore = DataStore::lookup_datastore(&store)?;
98
99 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
100
101 let group_hash = group_backups(backup_list);
102
103 let mut groups = Vec::new();
104
105 for (_group_id, mut list) in group_hash {
106
107 BackupInfo::sort_list(&mut list, false);
108
109 let info = &list[0];
110 let group = info.backup_dir.group();
111
112 let result_item = GroupListItem {
113 backup_type: group.backup_type().to_string(),
114 backup_id: group.backup_id().to_string(),
115 last_backup: info.backup_dir.backup_time().timestamp(),
116 backup_count: list.len() as u64,
117 files: info.files.clone(),
118 };
119 groups.push(result_item);
120 }
121
122 Ok(groups)
123 }
124
125 #[api(
126 input: {
127 properties: {
128 store: {
129 schema: DATASTORE_SCHEMA,
130 },
131 "backup-type": {
132 schema: BACKUP_TYPE_SCHEMA,
133 },
134 "backup-id": {
135 schema: BACKUP_ID_SCHEMA,
136 },
137 "backup-time": {
138 schema: BACKUP_TIME_SCHEMA,
139 },
140 },
141 },
142 returns: {
143 type: Array,
144 description: "Returns the list of archive files inside a backup snapshots.",
145 items: {
146 type: BackupContent,
147 }
148 },
149 access: {
150 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
151 },
152 )]
153 /// List snapshot files.
154 pub fn list_snapshot_files(
155 store: String,
156 backup_type: String,
157 backup_id: String,
158 backup_time: i64,
159 _info: &ApiMethod,
160 _rpcenv: &mut dyn RpcEnvironment,
161 ) -> Result<Vec<BackupContent>, Error> {
162
163 let datastore = DataStore::lookup_datastore(&store)?;
164 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
165
166 let mut files = read_backup_index(&datastore, &snapshot)?;
167
168 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
169
170 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
171 acc.insert(item.filename.clone());
172 acc
173 });
174
175 for file in info.files {
176 if file_set.contains(&file) { continue; }
177 files.push(BackupContent { filename: file, size: None });
178 }
179
180 Ok(files)
181 }
182
183 #[api(
184 input: {
185 properties: {
186 store: {
187 schema: DATASTORE_SCHEMA,
188 },
189 "backup-type": {
190 schema: BACKUP_TYPE_SCHEMA,
191 },
192 "backup-id": {
193 schema: BACKUP_ID_SCHEMA,
194 },
195 "backup-time": {
196 schema: BACKUP_TIME_SCHEMA,
197 },
198 },
199 },
200 access: {
201 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_PRUNE, false),
202 },
203 )]
204 /// Delete backup snapshot.
205 fn delete_snapshot(
206 store: String,
207 backup_type: String,
208 backup_id: String,
209 backup_time: i64,
210 _info: &ApiMethod,
211 _rpcenv: &mut dyn RpcEnvironment,
212 ) -> Result<Value, Error> {
213
214 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
215
216 let datastore = DataStore::lookup_datastore(&store)?;
217
218 datastore.remove_backup_dir(&snapshot)?;
219
220 Ok(Value::Null)
221 }
222
223 #[api(
224 input: {
225 properties: {
226 store: {
227 schema: DATASTORE_SCHEMA,
228 },
229 "backup-type": {
230 optional: true,
231 schema: BACKUP_TYPE_SCHEMA,
232 },
233 "backup-id": {
234 optional: true,
235 schema: BACKUP_ID_SCHEMA,
236 },
237 },
238 },
239 returns: {
240 type: Array,
241 description: "Returns the list of snapshots.",
242 items: {
243 type: SnapshotListItem,
244 }
245 },
246 access: {
247 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
248 },
249 )]
250 /// List backup snapshots.
251 pub fn list_snapshots (
252 param: Value,
253 _info: &ApiMethod,
254 _rpcenv: &mut dyn RpcEnvironment,
255 ) -> Result<Vec<SnapshotListItem>, Error> {
256
257 let store = tools::required_string_param(&param, "store")?;
258 let backup_type = param["backup-type"].as_str();
259 let backup_id = param["backup-id"].as_str();
260
261 let datastore = DataStore::lookup_datastore(store)?;
262
263 let base_path = datastore.base_path();
264
265 let backup_list = BackupInfo::list_backups(&base_path)?;
266
267 let mut snapshots = vec![];
268
269 for info in backup_list {
270 let group = info.backup_dir.group();
271 if let Some(backup_type) = backup_type {
272 if backup_type != group.backup_type() { continue; }
273 }
274 if let Some(backup_id) = backup_id {
275 if backup_id != group.backup_id() { continue; }
276 }
277
278 let mut result_item = SnapshotListItem {
279 backup_type: group.backup_type().to_string(),
280 backup_id: group.backup_id().to_string(),
281 backup_time: info.backup_dir.backup_time().timestamp(),
282 files: info.files,
283 size: None,
284 };
285
286 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
287 let mut backup_size = 0;
288 for item in index.iter() {
289 if let Some(item_size) = item.size {
290 backup_size += item_size;
291 }
292 }
293 result_item.size = Some(backup_size);
294 }
295
296 snapshots.push(result_item);
297 }
298
299 Ok(snapshots)
300 }
301
302 #[api(
303 input: {
304 properties: {
305 store: {
306 schema: DATASTORE_SCHEMA,
307 },
308 },
309 },
310 returns: {
311 type: StorageStatus,
312 },
313 access: {
314 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
315 },
316 )]
317 /// Get datastore status.
318 pub fn status(
319 store: String,
320 _info: &ApiMethod,
321 _rpcenv: &mut dyn RpcEnvironment,
322 ) -> Result<StorageStatus, Error> {
323
324 let datastore = DataStore::lookup_datastore(&store)?;
325
326 let base_path = datastore.base_path();
327
328 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
329
330 use nix::NixPath;
331
332 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
333 nix::errno::Errno::result(res)?;
334
335 let bsize = stat.f_bsize as u64;
336
337 Ok(StorageStatus {
338 total: stat.f_blocks*bsize,
339 used: (stat.f_blocks-stat.f_bfree)*bsize,
340 avail: stat.f_bavail*bsize,
341 })
342 }
343
344 #[macro_export]
345 macro_rules! add_common_prune_prameters {
346 ( [ $( $list1:tt )* ] ) => {
347 add_common_prune_prameters!([$( $list1 )* ] , [])
348 };
349 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
350 [
351 $( $list1 )*
352 (
353 "keep-daily",
354 true,
355 &IntegerSchema::new("Number of daily backups to keep.")
356 .minimum(1)
357 .schema()
358 ),
359 (
360 "keep-hourly",
361 true,
362 &IntegerSchema::new("Number of hourly backups to keep.")
363 .minimum(1)
364 .schema()
365 ),
366 (
367 "keep-last",
368 true,
369 &IntegerSchema::new("Number of backups to keep.")
370 .minimum(1)
371 .schema()
372 ),
373 (
374 "keep-monthly",
375 true,
376 &IntegerSchema::new("Number of monthly backups to keep.")
377 .minimum(1)
378 .schema()
379 ),
380 (
381 "keep-weekly",
382 true,
383 &IntegerSchema::new("Number of weekly backups to keep.")
384 .minimum(1)
385 .schema()
386 ),
387 (
388 "keep-yearly",
389 true,
390 &IntegerSchema::new("Number of yearly backups to keep.")
391 .minimum(1)
392 .schema()
393 ),
394 $( $list2 )*
395 ]
396 }
397 }
398
399 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
400 &ApiHandler::Sync(&prune),
401 &ObjectSchema::new(
402 "Prune the datastore.",
403 &add_common_prune_prameters!([
404 ("backup-id", false, &BACKUP_ID_SCHEMA),
405 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
406 ("dry-run", true, &BooleanSchema::new(
407 "Just show what prune would do, but do not delete anything.")
408 .schema()
409 ),
410 ],[
411 ("store", false, &DATASTORE_SCHEMA),
412 ])
413 )
414 ).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_PRUNE, false));
415
416 fn prune(
417 param: Value,
418 _info: &ApiMethod,
419 _rpcenv: &mut dyn RpcEnvironment,
420 ) -> Result<Value, Error> {
421
422 let store = param["store"].as_str().unwrap();
423
424 let backup_type = tools::required_string_param(&param, "backup-type")?;
425 let backup_id = tools::required_string_param(&param, "backup-id")?;
426
427 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
428
429 let group = BackupGroup::new(backup_type, backup_id);
430
431 let datastore = DataStore::lookup_datastore(store)?;
432
433 let prune_options = PruneOptions {
434 keep_last: param["keep-last"].as_u64(),
435 keep_hourly: param["keep-hourly"].as_u64(),
436 keep_daily: param["keep-daily"].as_u64(),
437 keep_weekly: param["keep-weekly"].as_u64(),
438 keep_monthly: param["keep-monthly"].as_u64(),
439 keep_yearly: param["keep-yearly"].as_u64(),
440 };
441
442 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
443
444 let mut prune_result = Vec::new();
445
446 let list = group.list_backups(&datastore.base_path())?;
447
448 let mut prune_info = compute_prune_info(list, &prune_options)?;
449
450 prune_info.reverse(); // delete older snapshots first
451
452 let keep_all = !prune_options.keeps_something();
453
454 if dry_run {
455 for (info, mut keep) in prune_info {
456 if keep_all { keep = true; }
457
458 let backup_time = info.backup_dir.backup_time();
459 let group = info.backup_dir.group();
460
461 prune_result.push(json!({
462 "backup-type": group.backup_type(),
463 "backup-id": group.backup_id(),
464 "backup-time": backup_time.timestamp(),
465 "keep": keep,
466 }));
467 }
468 return Ok(json!(prune_result));
469 }
470
471
472 // We use a WorkerTask just to have a task log, but run synchrounously
473 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
474
475 let result = try_block! {
476 if keep_all {
477 worker.log("No prune selection - keeping all files.");
478 } else {
479 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
480 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
481 store, backup_type, backup_id));
482 }
483
484 for (info, mut keep) in prune_info {
485 if keep_all { keep = true; }
486
487 let backup_time = info.backup_dir.backup_time();
488 let timestamp = BackupDir::backup_time_to_string(backup_time);
489 let group = info.backup_dir.group();
490
491
492 let msg = format!(
493 "{}/{}/{} {}",
494 group.backup_type(),
495 group.backup_id(),
496 timestamp,
497 if keep { "keep" } else { "remove" },
498 );
499
500 worker.log(msg);
501
502 prune_result.push(json!({
503 "backup-type": group.backup_type(),
504 "backup-id": group.backup_id(),
505 "backup-time": backup_time.timestamp(),
506 "keep": keep,
507 }));
508
509 if !(dry_run || keep) {
510 datastore.remove_backup_dir(&info.backup_dir)?;
511 }
512 }
513
514 Ok(())
515 };
516
517 worker.log_result(&result);
518
519 if let Err(err) = result {
520 bail!("prune failed - {}", err);
521 };
522
523 Ok(json!(prune_result))
524 }
525
526 #[api(
527 input: {
528 properties: {
529 store: {
530 schema: DATASTORE_SCHEMA,
531 },
532 },
533 },
534 returns: {
535 schema: UPID_SCHEMA,
536 },
537 access: {
538 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_PRUNE, false),
539 },
540 )]
541 /// Start garbage collection.
542 fn start_garbage_collection(
543 store: String,
544 _info: &ApiMethod,
545 rpcenv: &mut dyn RpcEnvironment,
546 ) -> Result<Value, Error> {
547
548 let datastore = DataStore::lookup_datastore(&store)?;
549
550 println!("Starting garbage collection on store {}", store);
551
552 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
553
554 let upid_str = WorkerTask::new_thread(
555 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
556 {
557 worker.log(format!("starting garbage collection on store {}", store));
558 datastore.garbage_collection(worker)
559 })?;
560
561 Ok(json!(upid_str))
562 }
563
564 #[api(
565 input: {
566 properties: {
567 store: {
568 schema: DATASTORE_SCHEMA,
569 },
570 },
571 },
572 returns: {
573 type: GarbageCollectionStatus,
574 },
575 access: {
576 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
577 },
578 )]
579 /// Garbage collection status.
580 pub fn garbage_collection_status(
581 store: String,
582 _info: &ApiMethod,
583 _rpcenv: &mut dyn RpcEnvironment,
584 ) -> Result<GarbageCollectionStatus, Error> {
585
586 let datastore = DataStore::lookup_datastore(&store)?;
587
588 let status = datastore.last_gc_status();
589
590 Ok(status)
591 }
592
593 #[api(
594 access: {
595 permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
596 },
597 )]
598 /// Datastore list
599 fn get_datastore_list(
600 _param: Value,
601 _info: &ApiMethod,
602 _rpcenv: &mut dyn RpcEnvironment,
603 ) -> Result<Value, Error> {
604
605 let (config, _digest) = datastore::config()?;
606
607 Ok(config.convert_to_array("store", None, &[]))
608 }
609
610 #[sortable]
611 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
612 &ApiHandler::AsyncHttp(&download_file),
613 &ObjectSchema::new(
614 "Download single raw file from backup snapshot.",
615 &sorted!([
616 ("store", false, &DATASTORE_SCHEMA),
617 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
618 ("backup-id", false, &BACKUP_ID_SCHEMA),
619 ("backup-time", false, &BACKUP_TIME_SCHEMA),
620 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
621 ]),
622 )
623 ).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ, false));
624
625 fn download_file(
626 _parts: Parts,
627 _req_body: Body,
628 param: Value,
629 _info: &ApiMethod,
630 _rpcenv: Box<dyn RpcEnvironment>,
631 ) -> ApiResponseFuture {
632
633 async move {
634 let store = tools::required_string_param(&param, "store")?;
635
636 let datastore = DataStore::lookup_datastore(store)?;
637
638 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
639
640 let backup_type = tools::required_string_param(&param, "backup-type")?;
641 let backup_id = tools::required_string_param(&param, "backup-id")?;
642 let backup_time = tools::required_integer_param(&param, "backup-time")?;
643
644 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
645 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
646
647 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
648
649 let mut path = datastore.base_path();
650 path.push(backup_dir.relative_path());
651 path.push(&file_name);
652
653 let file = tokio::fs::File::open(path)
654 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
655 .await?;
656
657 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
658 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
659 let body = Body::wrap_stream(payload);
660
661 // fixme: set other headers ?
662 Ok(Response::builder()
663 .status(StatusCode::OK)
664 .header(header::CONTENT_TYPE, "application/octet-stream")
665 .body(body)
666 .unwrap())
667 }.boxed()
668 }
669
670 #[sortable]
671 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
672 &ApiHandler::AsyncHttp(&upload_backup_log),
673 &ObjectSchema::new(
674 "Download single raw file from backup snapshot.",
675 &sorted!([
676 ("store", false, &DATASTORE_SCHEMA),
677 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
678 ("backup-id", false, &BACKUP_ID_SCHEMA),
679 ("backup-time", false, &BACKUP_TIME_SCHEMA),
680 ]),
681 )
682 ).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_CREATE_BACKUP, false));
683
684 fn upload_backup_log(
685 _parts: Parts,
686 req_body: Body,
687 param: Value,
688 _info: &ApiMethod,
689 _rpcenv: Box<dyn RpcEnvironment>,
690 ) -> ApiResponseFuture {
691
692 async move {
693 let store = tools::required_string_param(&param, "store")?;
694
695 let datastore = DataStore::lookup_datastore(store)?;
696
697 let file_name = "client.log.blob";
698
699 let backup_type = tools::required_string_param(&param, "backup-type")?;
700 let backup_id = tools::required_string_param(&param, "backup-id")?;
701 let backup_time = tools::required_integer_param(&param, "backup-time")?;
702
703 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
704
705 let mut path = datastore.base_path();
706 path.push(backup_dir.relative_path());
707 path.push(&file_name);
708
709 if path.exists() {
710 bail!("backup already contains a log.");
711 }
712
713 println!("Upload backup log to {}/{}/{}/{}/{}", store,
714 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
715
716 let data = req_body
717 .map_err(Error::from)
718 .try_fold(Vec::new(), |mut acc, chunk| {
719 acc.extend_from_slice(&*chunk);
720 future::ok::<_, Error>(acc)
721 })
722 .await?;
723
724 let blob = DataBlob::from_raw(data)?;
725 // always verify CRC at server side
726 blob.verify_crc()?;
727 let raw_data = blob.raw_data();
728 replace_file(&path, raw_data, CreateOptions::new())?;
729
730 // fixme: use correct formatter
731 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
732 }.boxed()
733 }
734
735 #[sortable]
736 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
737 (
738 "download",
739 &Router::new()
740 .download(&API_METHOD_DOWNLOAD_FILE)
741 ),
742 (
743 "files",
744 &Router::new()
745 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
746 ),
747 (
748 "gc",
749 &Router::new()
750 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
751 .post(&API_METHOD_START_GARBAGE_COLLECTION)
752 ),
753 (
754 "groups",
755 &Router::new()
756 .get(&API_METHOD_LIST_GROUPS)
757 ),
758 (
759 "prune",
760 &Router::new()
761 .post(&API_METHOD_PRUNE)
762 ),
763 (
764 "snapshots",
765 &Router::new()
766 .get(&API_METHOD_LIST_SNAPSHOTS)
767 .delete(&API_METHOD_DELETE_SNAPSHOT)
768 ),
769 (
770 "status",
771 &Router::new()
772 .get(&API_METHOD_STATUS)
773 ),
774 (
775 "upload-backup-log",
776 &Router::new()
777 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
778 ),
779 ];
780
781 const DATASTORE_INFO_ROUTER: Router = Router::new()
782 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
783 .subdirs(DATASTORE_INFO_SUBDIRS);
784
785
786 pub const ROUTER: Router = Router::new()
787 .get(&API_METHOD_GET_DATASTORE_LIST)
788 .match_all("store", &DATASTORE_INFO_ROUTER);