]> git.proxmox.com Git - ceph.git/blame - ceph/src/cephadm/box/osd.py
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / cephadm / box / osd.py
CommitLineData
20effc67
TL
1import json
2import os
1e59de90
TL
3import time
4import re
20effc67
TL
5from typing import Dict
6
33c7a0ef 7from util import (
1e59de90 8 BoxType,
33c7a0ef
TL
9 Config,
10 Target,
11 ensure_inside_container,
12 ensure_outside_container,
13 get_orch_hosts,
33c7a0ef
TL
14 run_cephadm_shell_command,
15 run_dc_shell_command,
1e59de90 16 get_container_engine,
33c7a0ef
TL
17 run_shell_command,
18)
20effc67 19
1e59de90 20DEVICES_FILE="./devices.json"
20effc67
TL
21
22def remove_loop_img() -> None:
23 loop_image = Config.get('loop_img')
24 if os.path.exists(loop_image):
25 os.remove(loop_image)
26
1e59de90 27def create_loopback_devices(osds: int) -> Dict[int, Dict[str, str]]:
20effc67 28 assert osds
1e59de90
TL
29 cleanup_osds()
30 osd_devs = dict()
20effc67 31
20effc67 32 for i in range(osds):
1e59de90
TL
33 img_name = f'osd{i}'
34 loop_dev = create_loopback_device(img_name)
35 osd_devs[i] = dict(img_name=img_name, device=loop_dev)
36 with open(DEVICES_FILE, 'w') as dev_file:
37 dev_file.write(json.dumps(osd_devs))
38 return osd_devs
39
40def create_loopback_device(img_name, size_gb=5):
41 loop_img_dir = Config.get('loop_img_dir')
42 run_shell_command(f'mkdir -p {loop_img_dir}')
43 loop_img = os.path.join(loop_img_dir, img_name)
44 run_shell_command(f'rm -f {loop_img}')
45 run_shell_command(f'dd if=/dev/zero of={loop_img} bs=1 count=0 seek={size_gb}G')
46 loop_dev = run_shell_command(f'sudo losetup -f')
47 if not os.path.exists(loop_dev):
48 dev_minor = re.match(r'\/dev\/[^\d]+(\d+)', loop_dev).groups()[0]
49 run_shell_command(f'sudo mknod -m777 {loop_dev} b 7 {dev_minor}')
50 run_shell_command(f'sudo chown {os.getuid()}:{os.getgid()} {loop_dev}')
51 if os.path.ismount(loop_dev):
52 os.umount(loop_dev)
53 run_shell_command(f'sudo losetup {loop_dev} {loop_img}')
54 run_shell_command(f'sudo chown {os.getuid()}:{os.getgid()} {loop_dev}')
55 return loop_dev
20effc67 56
33c7a0ef 57
20effc67
TL
58def get_lvm_osd_data(data: str) -> Dict[str, str]:
59 osd_lvm_info = run_cephadm_shell_command(f'ceph-volume lvm list {data}')
60 osd_data = {}
61 for line in osd_lvm_info.split('\n'):
62 line = line.strip()
63 if not line:
64 continue
65 line = line.split()
66 if line[0].startswith('===') or line[0].startswith('[block]'):
67 continue
68 # "block device" key -> "block_device"
69 key = '_'.join(line[:-1])
70 osd_data[key] = line[-1]
71 return osd_data
72
1e59de90
TL
73def load_osd_devices():
74 if not os.path.exists(DEVICES_FILE):
75 return dict()
76 with open(DEVICES_FILE) as dev_file:
77 devs = json.loads(dev_file.read())
78 return devs
79
33c7a0ef 80
20effc67 81@ensure_inside_container
33c7a0ef 82def deploy_osd(data: str, hostname: str) -> bool:
1e59de90 83 out = run_cephadm_shell_command(f'ceph orch daemon add osd {hostname}:{data} raw')
33c7a0ef
TL
84 return 'Created osd(s)' in out
85
20effc67 86
1e59de90
TL
87def cleanup_osds() -> None:
88 loop_img_dir = Config.get('loop_img_dir')
89 osd_devs = load_osd_devices()
90 for osd in osd_devs.values():
91 device = osd['device']
92 if 'loop' in device:
93 loop_img = os.path.join(loop_img_dir, osd['img_name'])
94 run_shell_command(f'sudo losetup -d {device}', expect_error=True)
95 if os.path.exists(loop_img):
96 os.remove(loop_img)
97 run_shell_command(f'rm -rf {loop_img_dir}')
98
99
100def deploy_osds(count: int):
101 osd_devs = load_osd_devices()
102 hosts = get_orch_hosts()
103 host_index = 0
104 seed = get_container_engine().get_seed()
105 v = '-v' if Config.get('verbose') else ''
106 for osd in osd_devs.values():
107 deployed = False
108 while not deployed:
109 print(hosts)
110 hostname = hosts[host_index]['hostname']
111 deployed = run_dc_shell_command(
112 f'/cephadm/box/box.py {v} osd deploy --data {osd["device"]} --hostname {hostname}',
113 seed
114 )
115 deployed = 'created osd' in deployed.lower() or 'already created?' in deployed.lower()
116 print('Waiting 5 seconds to re-run deploy osd...')
117 time.sleep(5)
118 host_index = (host_index + 1) % len(hosts)
33c7a0ef
TL
119
120
20effc67 121class Osd(Target):
33c7a0ef 122 _help = """
20effc67
TL
123 Deploy osds and create needed block devices with loopback devices:
124 Actions:
125 - deploy: Deploy an osd given a block device
126 - create_loop: Create needed loopback devices and block devices in logical volumes
127 for a number of osds.
1e59de90 128 - destroy: Remove all osds and the underlying loopback devices.
33c7a0ef 129 """
1e59de90 130 actions = ['deploy', 'create_loop', 'destroy']
20effc67
TL
131
132 def set_args(self):
133 self.parser.add_argument('action', choices=Osd.actions)
134 self.parser.add_argument('--data', type=str, help='path to a block device')
135 self.parser.add_argument('--hostname', type=str, help='host to deploy osd')
136 self.parser.add_argument('--osds', type=int, default=0, help='number of osds')
20effc67 137
20effc67
TL
138 def deploy(self):
139 data = Config.get('data')
140 hostname = Config.get('hostname')
20effc67
TL
141 if not hostname:
142 # assume this host
143 hostname = run_shell_command('hostname')
1e59de90
TL
144 if not data:
145 deploy_osds(Config.get('osds'))
20effc67
TL
146 else:
147 deploy_osd(data, hostname)
148
149 @ensure_outside_container
150 def create_loop(self):
151 osds = Config.get('osds')
1e59de90
TL
152 create_loopback_devices(int(osds))
153 print('Successfully created loopback devices')
154
155 @ensure_outside_container
156 def destroy(self):
157 cleanup_osds()