]>
git.proxmox.com Git - ceph.git/blob - ceph/src/python-common/ceph/deployment/translate.py
2d373732c04f1fb44881901aa0e8aaae842c025e
4 from typing
import Optional
, List
8 from ceph
.deployment
.drive_selection
.selector
import DriveSelection
10 logger
= logging
.getLogger(__name__
)
13 # TODO refactor this to a DriveSelection method
14 class to_ceph_volume(object):
17 selection
, # type: DriveSelection
18 osd_id_claims
=None, # type: Optional[List[str]]
19 preview
=False # type: bool
22 self
.selection
= selection
23 self
.spec
= selection
.spec
24 self
.preview
= preview
25 self
.osd_id_claims
= osd_id_claims
28 # type: () -> List[str]
29 """ Generate ceph-volume commands based on the DriveGroup filters """
30 data_devices
= [x
.path
for x
in self
.selection
.data_devices()]
31 db_devices
= [x
.path
for x
in self
.selection
.db_devices()]
32 wal_devices
= [x
.path
for x
in self
.selection
.wal_devices()]
33 journal_devices
= [x
.path
for x
in self
.selection
.journal_devices()]
39 if self
.spec
.method
== 'raw':
40 assert self
.spec
.objectstore
== 'bluestore'
41 # ceph-volume raw prepare only support 1:1 ratio of data to db/wal devices
42 if data_devices
and db_devices
:
43 if len(data_devices
) != len(db_devices
):
44 raise ValueError('Number of data devices must match number of '
45 'db devices for raw mode osds')
46 if data_devices
and wal_devices
:
47 if len(data_devices
) != len(wal_devices
):
48 raise ValueError('Number of data devices must match number of '
49 'wal devices for raw mode osds')
50 # for raw prepare each data device needs its own prepare command
52 while dev_counter
< len(data_devices
):
53 cmd
= "raw prepare --bluestore"
54 cmd
+= " --data {}".format(data_devices
[dev_counter
])
56 cmd
+= " --block.db {}".format(db_devices
[dev_counter
])
58 cmd
+= " --block.wal {}".format(wal_devices
[dev_counter
])
62 elif self
.spec
.objectstore
== 'filestore':
63 # for lvm batch we can just do all devices in one command
64 cmd
= "lvm batch --no-auto"
66 cmd
+= " {}".format(" ".join(data_devices
))
68 if self
.spec
.journal_size
:
69 cmd
+= " --journal-size {}".format(self
.spec
.journal_size
)
72 cmd
+= " --journal-devices {}".format(
73 ' '.join(journal_devices
))
78 elif self
.spec
.objectstore
== 'bluestore':
79 # for lvm batch we can just do all devices in one command
80 cmd
= "lvm batch --no-auto {}".format(" ".join(data_devices
))
83 cmd
+= " --db-devices {}".format(" ".join(db_devices
))
86 cmd
+= " --wal-devices {}".format(" ".join(wal_devices
))
88 if self
.spec
.block_wal_size
:
89 cmd
+= " --block-wal-size {}".format(self
.spec
.block_wal_size
)
91 if self
.spec
.block_db_size
:
92 cmd
+= " --block-db-size {}".format(self
.spec
.block_db_size
)
95 for i
in range(len(cmds
)):
96 if self
.spec
.encrypted
:
97 cmds
[i
] += " --dmcrypt"
99 if self
.spec
.osds_per_device
:
100 cmds
[i
] += " --osds-per-device {}".format(self
.spec
.osds_per_device
)
102 if self
.spec
.data_allocate_fraction
:
103 cmds
[i
] += " --data-allocate-fraction {}".format(self
.spec
.data_allocate_fraction
)
105 if self
.osd_id_claims
:
106 cmds
[i
] += " --osd-ids {}".format(" ".join(self
.osd_id_claims
))
108 if self
.spec
.method
!= 'raw':
110 cmds
[i
] += " --no-systemd"
112 if self
.spec
.crush_device_class
:
113 cmds
[i
] += " --crush-device-class {}".format(self
.spec
.crush_device_class
)
116 cmds
[i
] += " --report"
117 cmds
[i
] += " --format json"