# Fill up the cluster. This dd may or may not fail, as it depends on
# how soon the cluster recognises its own fullness
- self.mount_a.write_n_mb("large_file_a", self.fill_mb / 2)
+ self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2)
try:
- self.mount_a.write_n_mb("large_file_b", self.fill_mb / 2)
+ self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2)
except CommandFailedError:
log.info("Writing file B failed (full status happened already)")
assert self.is_full()
# Attempting to write more data should give me ENOSPC
with self.assertRaises(CommandFailedError) as ar:
- self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb / 2)
+ self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2)
self.assertEqual(ar.exception.exitstatus, 1) # dd returns 1 on "No space"
# Wait for the MDS to see the latest OSD map so that it will reliably
# Configs for this test should bring this setting down in order to
# run reasonably quickly
if osd_mon_report_interval > 10:
- log.warn("This test may run rather slowly unless you decrease"
+ log.warning("This test may run rather slowly unless you decrease"
"osd_mon_report_interval (5 is a good setting)!")
self.mount_a.run_python(template.format(
Test per-pool fullness, which indicates quota limits exceeded
"""
pool_capacity = 1024 * 1024 * 32 # arbitrary low-ish limit
- fill_mb = pool_capacity / (1024 * 1024) # type: ignore
+ fill_mb = pool_capacity // (1024 * 1024) # type: ignore
# We are only testing quota handling on the data pool, not the metadata
# pool.
max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail']
full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon"))
TestClusterFull.pool_capacity = int(max_avail * full_ratio)
- TestClusterFull.fill_mb = (self.pool_capacity / (1024 * 1024))
+ TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024))
def is_full(self):
return self.fs.is_full()