]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/special/ceph_objectstore_tool.py
3 from __future__
import print_function
4 from subprocess
import call
6 from subprocess
import check_output
8 def check_output (* popenargs
, ** kwargs
):
10 # backported from python 2.7 stdlib
11 process
= subprocess
. Popen (
12 stdout
= subprocess
. PIPE
, * popenargs
, ** kwargs
)
13 output
, unused_err
= process
. communicate ()
14 retcode
= process
. poll ()
16 cmd
= kwargs
. get ( "args" )
19 error
= subprocess
. CalledProcessError ( retcode
, cmd
)
37 from subprocess
import DEVNULL
39 DEVNULL
= open ( os
. devnull
, "wb" )
41 logging
. basicConfig ( format
= ' %(levelname)s : %(message)s ' , level
= logging
. WARNING
)
44 if sys
. version_info
[ 0 ] >= 3 :
46 return s
. decode ( 'utf-8' )
48 def check_output (* args
, ** kwargs
):
49 return decode ( subprocess
. check_output (* args
, ** kwargs
))
56 def wait_for_health ():
57 print ( "Wait for health_ok..." , end
= "" )
59 while call ( " {path} /ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null" . format ( path
= CEPH_BIN
), shell
= True ) == 0 :
62 raise Exception ( "Time exceeded to go to health" )
67 def get_pool_id ( name
, nullfd
):
68 cmd
= " {path} /ceph osd pool stats {pool} " . format ( pool
= name
, path
= CEPH_BIN
). split ()
69 # pool {pool} id # .... grab the 4 field
70 return check_output ( cmd
, stderr
= nullfd
). split ()[ 3 ]
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs ( SUBDIR
, ID
):
77 endhead
= re
. compile ( " {id} .*_head$" . format ( id = ID
))
78 DIR
= os
. path
. join ( SUBDIR
, "current" )
79 PGS
+= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and ( ID
is None or endhead
. match ( f
))]
80 PGS
= [ re
. sub ( "_head" , "" , p
) for p
in PGS
if "_head" in p
]
84 # return a sorted list of unique PGs given a directory
86 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
89 SUBDIR
= os
. path
. join ( DIR
, d
)
90 PGS
+= get_osd_pgs ( SUBDIR
, ID
)
91 return sorted ( set ( PGS
))
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs ( ALLPGS
, prefix
, DIR
, ID
):
96 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
99 DIRL2
= os
. path
. join ( DIR
, d
)
100 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
103 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
105 FINALDIR
= os
. path
. join ( SUBDIR
, PGDIR
)
106 # See if there are any objects there
107 if any ( f
for f
in [ val
for _
, _
, fl
in os
. walk ( FINALDIR
) for val
in fl
] if f
. startswith ( prefix
)):
109 return sorted ( set ( PGS
))
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds ( PG
, DIR
):
114 ALLOSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
117 DIRL2
= os
. path
. join ( DIR
, d
)
118 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
120 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
126 def get_lines ( filename
):
127 tmpfd
= open ( filename
, "r" )
131 line
= tmpfd
. readline (). rstrip ( ' \n ' )
139 def cat_file ( level
, filename
):
140 if level
< logging
. getLogger (). getEffectiveLevel ():
142 print ( "File: " + filename
)
143 with
open ( filename
, "r" ) as f
:
145 line
= f
. readline (). rstrip ( ' \n ' )
152 def vstart ( new
, opt
= "" ):
153 print ( "vstarting...." , end
= "" )
154 NEW
= new
and "-n" or "-N"
155 call ( "MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 MGR_PYTHON_PATH= {path} /src/pybind/mgr {path} /src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1" . format ( new
= NEW
, opt
= opt
, path
= CEPH_ROOT
), shell
= True )
159 def test_failure ( cmd
, errmsg
, tty
= False ):
162 ttyfd
= open ( "/dev/tty" , "rwb" )
163 except Exception as e
:
165 logging
. info ( "SKIP " + cmd
)
167 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
168 tmpfd
= open ( TMPFILE
, "wb" )
172 ret
= call ( cmd
, shell
= True , stdin
= ttyfd
, stdout
= ttyfd
, stderr
= tmpfd
)
175 ret
= call ( cmd
, shell
= True , stderr
= tmpfd
)
179 logging
. error ( "Should have failed, but got exit 0" )
181 lines
= get_lines ( TMPFILE
)
182 matched
= [ l
for l
in lines
if errmsg
in l
]
184 logging
. info ( "Correctly failed with message \" " + matched
[ 0 ] + " \" " )
187 logging
. error ( "Command: " + cmd
)
188 logging
. error ( "Bad messages to stderr \" " + str ( lines
) + " \" " )
189 logging
. error ( "Expected \" " + errmsg
+ " \" " )
196 return "ns {num} " . format ( num
= num
)
199 def verify ( DATADIR
, POOL
, NAME_PREFIX
, db
):
200 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
202 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( NAME_PREFIX
) == 0 ]:
203 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
204 clone
= rawnsfile
. split ( "__" )[ 1 ]
205 nspace
= nsfile
. split ( "-" )[ 0 ]
206 file = nsfile
. split ( "-" )[ 1 ]
210 path
= os
. path
. join ( DATADIR
, rawnsfile
)
215 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' get {file} {out} " . format ( pool
= POOL
, file = file , out
= TMPFILE
, nspace
= nspace
, path
= CEPH_BIN
)
217 call ( cmd
, shell
= True , stdout
= DEVNULL
, stderr
= DEVNULL
)
218 cmd
= "diff -q {src} {result} " . format ( src
= path
, result
= TMPFILE
)
220 ret
= call ( cmd
, shell
= True )
222 logging
. error ( " {file} data not imported properly" . format ( file = file ))
228 for key
, val
in db
[ nspace
][ file ][ "xattr" ]. items ():
229 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getxattr {name} {key} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, path
= CEPH_BIN
)
231 getval
= check_output ( cmd
, shell
= True , stderr
= DEVNULL
)
232 logging
. debug ( "getxattr {key} {val} " . format ( key
= key
, val
= getval
))
234 logging
. error ( "getxattr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= key
, get
= getval
, orig
= val
))
237 hdr
= db
[ nspace
][ file ]. get ( "omapheader" , "" )
238 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapheader {name} {file} " . format ( pool
= POOL
, name
= file , nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
240 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
242 logging
. error ( "rados getomapheader returned {ret} " . format ( ret
= ret
))
245 getlines
= get_lines ( TMPFILE
)
246 assert ( len ( getlines
) == 0 or len ( getlines
) == 1 )
247 if len ( getlines
) == 0 :
251 logging
. debug ( "header: {hdr} " . format ( hdr
= gethdr
))
253 logging
. error ( "getomapheader returned wrong val: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
255 for key
, val
in db
[ nspace
][ file ][ "omap" ]. items ():
256 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapval {name} {key} {file} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
258 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
260 logging
. error ( "getomapval returned {ret} " . format ( ret
= ret
))
263 getlines
= get_lines ( TMPFILE
)
264 if len ( getlines
) != 1 :
265 logging
. error ( "Bad data from getomapval {lines} " . format ( lines
= getlines
))
269 logging
. debug ( "getomapval {key} {val} " . format ( key
= key
, val
= getval
))
271 logging
. error ( "getomapval returned wrong val: {get} instead of {orig} " . format ( get
= getval
, orig
= val
))
280 def check_journal ( jsondict
):
282 if 'header' not in jsondict
:
283 logging
. error ( "Key 'header' not in dump-journal" )
285 elif 'max_size' not in jsondict
[ 'header' ]:
286 logging
. error ( "Key 'max_size' not in dump-journal header" )
289 print ( " \t Journal max_size = {size} " . format ( size
= jsondict
[ 'header' ][ 'max_size' ]))
290 if 'entries' not in jsondict
:
291 logging
. error ( "Key 'entries' not in dump-journal output" )
293 elif len ( jsondict
[ 'entries' ]) == 0 :
294 logging
. info ( "No entries in journal found" )
296 errors
+= check_journal_entries ( jsondict
[ 'entries' ])
300 def check_journal_entries ( entries
):
302 for enum
in range ( len ( entries
)):
303 if 'offset' not in entries
[ enum
]:
304 logging
. error ( "No 'offset' key in entry {e} " . format ( e
= enum
))
306 if 'seq' not in entries
[ enum
]:
307 logging
. error ( "No 'seq' key in entry {e} " . format ( e
= enum
))
309 if 'transactions' not in entries
[ enum
]:
310 logging
. error ( "No 'transactions' key in entry {e} " . format ( e
= enum
))
312 elif len ( entries
[ enum
][ 'transactions' ]) == 0 :
313 logging
. error ( "No transactions found in entry {e} " . format ( e
= enum
))
316 errors
+= check_entry_transactions ( entries
[ enum
], enum
)
320 def check_entry_transactions ( entry
, enum
):
322 for tnum
in range ( len ( entry
[ 'transactions' ])):
323 if 'trans_num' not in entry
[ 'transactions' ][ tnum
]:
324 logging
. error ( "Key 'trans_num' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
326 elif entry
[ 'transactions' ][ tnum
][ 'trans_num' ] != tnum
:
327 ft
= entry
[ 'transactions' ][ tnum
][ 'trans_num' ]
328 logging
. error ( "Bad trans_num ( {ft} ) entry {e} trans {t} " . format ( ft
= ft
, e
= enum
, t
= tnum
))
330 if 'ops' not in entry
[ 'transactions' ][ tnum
]:
331 logging
. error ( "Key 'ops' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
334 errors
+= check_transaction_ops ( entry
[ 'transactions' ][ tnum
][ 'ops' ], enum
, tnum
)
338 def check_transaction_ops ( ops
, enum
, tnum
):
340 logging
. warning ( "No ops found in entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
342 for onum
in range ( len ( ops
)):
343 if 'op_num' not in ops
[ onum
]:
344 logging
. error ( "Key 'op_num' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
346 elif ops
[ onum
][ 'op_num' ] != onum
:
347 fo
= ops
[ onum
][ 'op_num' ]
348 logging
. error ( "Bad op_num ( {fo} ) from entry {e} trans {t} op {o} " . format ( fo
= fo
, e
= enum
, t
= tnum
, o
= onum
))
350 if 'op_name' not in ops
[ onum
]:
351 logging
. error ( "Key 'op_name' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
356 def test_dump_journal ( CFSD_PREFIX
, osds
):
359 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
362 # Test --op dump-journal by loading json
363 cmd
= ( CFSD_PREFIX
+ "--op dump-journal --format json" ). format ( osd
= osd
)
365 tmpfd
= open ( TMPFILE
, "wb" )
366 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
368 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
372 tmpfd
= open ( TMPFILE
, "r" )
373 jsondict
= json
. load ( tmpfd
)
377 journal_errors
= check_journal ( jsondict
)
378 if journal_errors
is not 0 :
379 logging
. error ( jsondict
)
380 ERRORS
+= journal_errors
384 CEPH_BUILD_DIR
= os
. environ
. get ( 'CEPH_BUILD_DIR' )
385 CEPH_BIN
= os
. environ
. get ( 'CEPH_BIN' )
386 CEPH_ROOT
= os
. environ
. get ( 'CEPH_ROOT' )
388 if not CEPH_BUILD_DIR
:
389 CEPH_BUILD_DIR
= os
. getcwd ()
390 os
. putenv ( 'CEPH_BUILD_DIR' , CEPH_BUILD_DIR
)
391 CEPH_BIN
= os
. path
. join ( CEPH_BUILD_DIR
, 'bin' )
392 os
. putenv ( 'CEPH_BIN' , CEPH_BIN
)
393 CEPH_ROOT
= os
. path
. dirname ( CEPH_BUILD_DIR
)
394 os
. putenv ( 'CEPH_ROOT' , CEPH_ROOT
)
395 CEPH_LIB
= os
. path
. join ( CEPH_BUILD_DIR
, 'lib' )
396 os
. putenv ( 'CEPH_LIB' , CEPH_LIB
)
401 pass # ok if this is already there
402 CEPH_DIR
= os
. path
. join ( CEPH_BUILD_DIR
, os
. path
. join ( "td" , "cot_dir" ))
403 CEPH_CONF
= os
. path
. join ( CEPH_DIR
, 'ceph.conf' )
406 call ( " {path} /init-ceph -c {conf} stop > /dev/null 2>&1" . format ( conf
= CEPH_CONF
, path
= CEPH_BIN
), shell
= True )
409 def check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
):
412 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( SPLIT_NAME
) == 0 ]:
413 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
414 clone
= rawnsfile
. split ( "__" )[ 1 ]
415 nspace
= nsfile
. split ( "-" )[ 0 ]
416 file = nsfile
. split ( "-" )[ 1 ] + "__" + clone
420 path
= os
. path
. join ( DATADIR
, rawnsfile
)
421 tmpfd
= open ( TMPFILE
, "wb" )
422 cmd
= "find {dir} -name ' {file} _*_ {nspace} _*'" . format ( dir = OSDDIR
, file = file , nspace
= nspace
)
424 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
426 logging
. critical ( "INTERNAL ERROR" )
429 obj_locs
= get_lines ( TMPFILE
)
430 if len ( obj_locs
) == 0 :
431 logging
. error ( "Can't find imported object {name} " . format ( name
= file ))
433 for obj_loc
in obj_locs
:
434 # For btrfs skip snap_* dirs
435 if re
. search ( "/snap_[0-9]*/" , obj_loc
) is not None :
438 cmd
= "diff -q {src} {obj_loc} " . format ( src
= path
, obj_loc
= obj_loc
)
440 ret
= call ( cmd
, shell
= True )
442 logging
. error ( " {file} data not imported properly into {obj} " . format ( file = file , obj
= obj_loc
))
444 return ERRORS
, repcount
447 def set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
448 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
450 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
451 osdmap_file
= osdmap_file
. name
)
452 output
= check_output ( cmd
, shell
= True )
453 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
455 new_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
456 old_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
457 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
458 crush_file
= old_crush_file
. name
, path
= CEPH_BIN
),
464 for osd_id
in osd_ids
:
465 cmd
= " {path} /crushtool -i {crush_file} --reweight-item osd. {osd} {weight} -o {new_crush_file} " . format ( osd
= osd_id
,
466 crush_file
= old_crush_file
. name
,
468 new_crush_file
= new_crush_file
. name
, path
= CEPH_BIN
)
469 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
471 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
473 # change them back, since we don't need to preapre for another round
474 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
475 old_crush_file
. close ()
477 ret
= call ( " {path} /osdmaptool --import-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
478 crush_file
= new_crush_file
. name
, path
= CEPH_BIN
),
484 # Minimum test of --dry-run by using it, but not checking anything
485 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
487 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
490 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491 # to use use a different epoch than the one in osdmap
492 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
494 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
498 def get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
):
499 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
500 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
501 osdmap_file
= osdmap_file
. name
)
502 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
505 # we have to read the weights from the crush map, even we can query the weights using
506 # osdmaptool, but please keep in mind, they are different:
507 # item weights in crush map versus weight associated with each osd in osdmap
508 crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
509 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
510 crush_file
= crush_file
. name
, path
= CEPH_BIN
),
514 output
= check_output ( " {path} /crushtool --tree -i {crush_file} | tail -n {num_osd} " . format ( crush_file
= crush_file
. name
,
515 num_osd
= len ( osd_ids
), path
= CEPH_BIN
),
519 for line
in output
. strip (). split ( ' \n ' ):
521 linev
= re
. split ( '\s+' , line
)
524 print ( 'linev %s ' % linev
)
525 weights
. append ( float ( linev
[ 2 ]))
530 def test_get_set_osdmap ( CFSD_PREFIX
, osd_ids
, osd_paths
):
531 print ( "Testing get-osdmap and set-osdmap" )
534 weight
= 1 / math
. e
# just some magic number in [0, 1]
536 for osd_path
in osd_paths
:
537 if set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
538 changed
. append ( osd_path
)
540 logging
. warning ( "Failed to change the weights: {0} " . format ( osd_path
))
541 # i am pissed off if none of the store gets changed
545 for osd_path
in changed
:
546 weights
= get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
)
550 if any ( abs ( w
- weight
) > 1e-5 for w
in weights
):
551 logging
. warning ( "Weight is not changed: {0} != {1} " . format ( weights
, weight
))
555 def test_get_set_inc_osdmap ( CFSD_PREFIX
, osd_path
):
556 # incrementals are not used unless we need to build an MOSDMap to update
557 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558 # with a different copy, and read it back to see if it matches.
560 file_e2
= tempfile
. NamedTemporaryFile ( delete
= True )
561 cmd
= ( CFSD_PREFIX
+ "--op get-inc-osdmap --file {file} " ). format ( osd
= osd_path
,
563 output
= check_output ( cmd
, shell
= True )
564 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
565 # backup e1 incremental before overwriting it
567 file_e1_backup
= tempfile
. NamedTemporaryFile ( delete
= True )
568 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
569 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
571 # overwrite e1 with e2
572 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --force --epoch {epoch} --file {file} "
573 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e2
. name
), shell
= True )
575 # Use dry-run to set back to e1 which shouldn't happen
576 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file} "
577 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
580 file_e1_read
= tempfile
. NamedTemporaryFile ( delete
= True )
581 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
582 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_read
. name
), shell
= True )
586 if not filecmp
. cmp ( file_e2
. name
, file_e1_read
. name
, shallow
= False ):
587 logging
. error ( "{{get,set}}-inc-osdmap mismatch {0} != {1} " . format ( file_e2
. name
, file_e1_read
. name
))
590 # revert the change with file_e1_backup
591 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --epoch {epoch} --file {file} "
592 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
594 logging
. error ( "Failed to revert the changed inc-osdmap" )
600 def test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
):
602 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
603 nullfd
= open ( os
. devnull
, "w" )
605 print ( "Test removeall" )
607 test_force_remove
= 0
608 for nspace
in db
. keys ():
609 for basename
in db
[ nspace
]. keys ():
610 JSON
= db
[ nspace
][ basename
][ 'json' ]
612 OSDS
= get_osds ( pg
, OSDDIR
)
614 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
615 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
616 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
620 if int ( basename
. split ( REP_NAME
)[ 1 ]) <= int ( NUM_CLONED_REP_OBJECTS
):
621 cmd
= ( CFSD_PREFIX
+ "' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
622 errors
+= test_failure ( cmd
, "Snapshots are present, use removeall to delete everything" )
623 if not test_force_remove
:
625 cmd
= ( CFSD_PREFIX
+ " ' {json} ' set-attr snapset /dev/null" ). format ( osd
= osd
, json
= JSON
)
627 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
629 logging
. error ( "Test set-up to corrupt snapset failed for {json} " . format ( json
= JSON
))
631 # Do the removeall since this test failed to set-up
633 test_force_remove
= 1
635 cmd
= ( CFSD_PREFIX
+ " ' {json} ' --force remove" ). format ( osd
= osd
, json
= JSON
)
637 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
639 logging
. error ( "forced remove with corrupt snapset failed for {json} " . format ( json
= JSON
))
643 cmd
= ( CFSD_PREFIX
+ " --force --dry-run ' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
645 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
647 logging
. error ( "remove with --force failed for {json} " . format ( json
= JSON
))
650 cmd
= ( CFSD_PREFIX
+ " --dry-run ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
652 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
654 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
657 cmd
= ( CFSD_PREFIX
+ " ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
659 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
661 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
664 tmpfd
= open ( TMPFILE
, "w" )
665 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --namespace {ns} {name} " ). format ( osd
= osd
, pg
= pg
, ns
= nspace
, name
= basename
)
667 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
669 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
672 lines
= get_lines ( TMPFILE
)
674 logging
. error ( "Removeall didn't remove all objects {ns} / {name} : {lines} " . format ( ns
= nspace
, name
= basename
, lines
= lines
))
678 cmd
= " {path} /rados -p {pool} rmsnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
680 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
682 logging
. error ( "rados rmsnap failed" )
690 if sys
. version_info
[ 0 ] < 3 :
691 sys
. stdout
= stdout
= os
. fdopen ( sys
. stdout
. fileno (), 'wb' , 0 )
693 stdout
= sys
. stdout
. buffer
694 if len ( argv
) > 1 and argv
[ 1 ] == "debug" :
699 call ( "rm -fr {dir} ; mkdir -p {dir} " . format ( dir = CEPH_DIR
), shell
= True )
701 os
. environ
[ "CEPH_DIR" ] = CEPH_DIR
703 REP_POOL
= "rep_pool"
704 REP_NAME
= "REPobject"
707 if len ( argv
) > 0 and argv
[ 0 ] == 'large' :
709 NUM_REP_OBJECTS
= 200
710 NUM_CLONED_REP_OBJECTS
= 50
713 # Larger data sets for first object per namespace
714 DATALINECOUNT
= 50000
715 # Number of objects to do xattr/omap testing on
720 NUM_CLONED_REP_OBJECTS
= 2
723 # Larger data sets for first object per namespace
725 # Number of objects to do xattr/omap testing on
729 TESTDIR
= "/tmp/test. {pid} " . format ( pid
= pid
)
730 DATADIR
= "/tmp/data. {pid} " . format ( pid
= pid
)
731 CFSD_PREFIX
= CEPH_BIN
+ "/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR
+ "/ {osd} "
732 PROFNAME
= "testecprofile"
734 os
. environ
[ 'CEPH_CONF' ] = CEPH_CONF
738 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= REP_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
740 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
742 REPID
= get_pool_id ( REP_POOL
, nullfd
)
744 print ( "Created Replicated pool # {repid} " . format ( repid
= REPID
))
746 cmd
= " {path} /ceph osd erasure-code-profile set {prof} crush-failure-domain=osd" . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
748 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
749 cmd
= " {path} /ceph osd erasure-code-profile get {prof} " . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
751 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
752 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} erasure {prof} " . format ( pool
= EC_POOL
, prof
= PROFNAME
, pg
= PG_COUNT
, path
= CEPH_BIN
)
754 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
755 ECID
= get_pool_id ( EC_POOL
, nullfd
)
757 print ( "Created Erasure coded pool # {ecid} " . format ( ecid
= ECID
))
759 print ( "Creating {objs} objects in replicated pool" . format ( objs
=( NUM_REP_OBJECTS
* NUM_NSPACES
)))
760 cmd
= "mkdir -p {datadir} " . format ( datadir
= DATADIR
)
762 call ( cmd
, shell
= True )
766 objects
= range ( 1 , NUM_REP_OBJECTS
+ 1 )
767 nspaces
= range ( NUM_NSPACES
)
769 nspace
= get_nspace ( n
)
774 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
775 LNAME
= nspace
+ "-" + NAME
776 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
779 cmd
= "rm -f " + DDNAME
781 call ( cmd
, shell
= True )
784 dataline
= range ( DATALINECOUNT
)
787 fd
= open ( DDNAME
, "w" )
788 data
= "This is the replicated data for " + LNAME
+ " \n "
793 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
795 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
797 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
800 db
[ nspace
][ NAME
] = {}
802 if i
< ATTR_OBJS
+ 1 :
806 db
[ nspace
][ NAME
][ "xattr" ] = {}
810 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
811 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
812 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
814 ret
= call ( cmd
, shell
= True )
816 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
818 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
820 # Create omap header in all objects but REPobject1
821 if i
< ATTR_OBJS
+ 1 and i
!= 1 :
822 myhdr
= "hdr {i} " . format ( i
= i
)
823 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapheader {name} {hdr} " . format ( pool
= REP_POOL
, name
= NAME
, hdr
= myhdr
, nspace
= nspace
, path
= CEPH_BIN
)
825 ret
= call ( cmd
, shell
= True )
827 logging
. critical ( "setomapheader failed with {ret} " . format ( ret
= ret
))
829 db
[ nspace
][ NAME
][ "omapheader" ] = myhdr
831 db
[ nspace
][ NAME
][ "omap" ] = {}
835 mykey
= "okey {i} - {k} " . format ( i
= i
, k
= k
)
836 myval
= "oval {i} - {k} " . format ( i
= i
, k
= k
)
837 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapval {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
839 ret
= call ( cmd
, shell
= True )
841 logging
. critical ( "setomapval failed with {ret} " . format ( ret
= ret
))
842 db
[ nspace
][ NAME
][ "omap" ][ mykey
] = myval
845 cmd
= " {path} /rados -p {pool} mksnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
847 call ( cmd
, shell
= True )
849 objects
= range ( 1 , NUM_CLONED_REP_OBJECTS
+ 1 )
850 nspaces
= range ( NUM_NSPACES
)
852 nspace
= get_nspace ( n
)
855 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
856 LNAME
= nspace
+ "-" + NAME
857 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
859 CLONENAME
= DDNAME
+ "__1"
862 cmd
= "mv -f " + DDNAME
+ " " + CLONENAME
864 call ( cmd
, shell
= True )
867 dataline
= range ( DATALINECOUNT
)
870 fd
= open ( DDNAME
, "w" )
871 data
= "This is the replicated data after a snapshot for " + LNAME
+ " \n "
876 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
878 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
880 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
883 print ( "Creating {objs} objects in erasure coded pool" . format ( objs
=( NUM_EC_OBJECTS
* NUM_NSPACES
)))
885 objects
= range ( 1 , NUM_EC_OBJECTS
+ 1 )
886 nspaces
= range ( NUM_NSPACES
)
888 nspace
= get_nspace ( n
)
891 NAME
= EC_NAME
+ " {num} " . format ( num
= i
)
892 LNAME
= nspace
+ "-" + NAME
893 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
896 cmd
= "rm -f " + DDNAME
898 call ( cmd
, shell
= True )
901 dataline
= range ( DATALINECOUNT
)
904 fd
= open ( DDNAME
, "w" )
905 data
= "This is the erasure coded data for " + LNAME
+ " \n "
910 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= EC_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
912 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
914 logging
. critical ( "Erasure coded pool creation failed with {ret} " . format ( ret
= ret
))
917 db
[ nspace
][ NAME
] = {}
919 db
[ nspace
][ NAME
][ "xattr" ] = {}
920 if i
< ATTR_OBJS
+ 1 :
927 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
928 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
929 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= EC_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
931 ret
= call ( cmd
, shell
= True )
933 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
935 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
937 # Omap isn't supported in EC pools
938 db
[ nspace
][ NAME
][ "omap" ] = {}
945 logging
. critical ( "Unable to set up test" )
948 ALLREPPGS
= get_pgs ( OSDDIR
, REPID
)
949 logging
. debug ( ALLREPPGS
)
950 ALLECPGS
= get_pgs ( OSDDIR
, ECID
)
951 logging
. debug ( ALLECPGS
)
953 OBJREPPGS
= get_objs ( ALLREPPGS
, REP_NAME
, OSDDIR
, REPID
)
954 logging
. debug ( OBJREPPGS
)
955 OBJECPGS
= get_objs ( ALLECPGS
, EC_NAME
, OSDDIR
, ECID
)
956 logging
. debug ( OBJECPGS
)
960 osds
= get_osds ( ONEPG
, OSDDIR
)
962 logging
. debug ( ONEOSD
)
964 print ( "Test invalid parameters" )
965 # On export can't use stdout to a terminal
966 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
967 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
969 # On export can't use stdout to a terminal
970 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
971 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
973 # Prep a valid ec export file for import failure tests
974 ONEECPG
= ALLECPGS
[ 0 ]
975 osds
= get_osds ( ONEECPG
, OSDDIR
)
977 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
978 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= ONEECPG
, file = OTHERFILE
)
980 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
984 # Prep a valid export file for import failure tests
985 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
986 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
988 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
990 # On import can't specify a different pgid than the file
991 TMPPG
= " {pool} .80" . format ( pool
= REPID
)
992 cmd
= ( CFSD_PREFIX
+ "--op import --pgid 12.dd --file {file} " ). format ( osd
= ONEOSD
, pg
= TMPPG
, file = OTHERFILE
)
993 ERRORS
+= test_failure ( cmd
, "specified pgid 12.dd does not match actual pgid" )
996 cmd
= ( CFSD_PREFIX
+ "--op import --file {FOO} " ). format ( osd
= ONEOSD
, FOO
= OTHERFILE
)
997 ERRORS
+= test_failure ( cmd
, "file: {FOO} : No such file or directory" . format ( FOO
= OTHERFILE
))
999 cmd
= " {path} /ceph-objectstore-tool --no-mon-config --data-path BAD_DATA_PATH --op list" . format ( osd
= ONEOSD
, path
= CEPH_BIN
)
1000 ERRORS
+= test_failure ( cmd
, "data-path: BAD_DATA_PATH: No such file or directory" )
1002 cmd
= ( CFSD_PREFIX
+ "--journal-path BAD_JOURNAL_PATH --op list" ). format ( osd
= ONEOSD
)
1003 ERRORS
+= test_failure ( cmd
, "journal-path: BAD_JOURNAL_PATH: No such file or directory" )
1005 cmd
= ( CFSD_PREFIX
+ "--journal-path /bin --op list" ). format ( osd
= ONEOSD
)
1006 ERRORS
+= test_failure ( cmd
, "journal-path: /bin: (21) Is a directory" )
1008 # On import can't use stdin from a terminal
1009 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1010 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1012 # On import can't use stdin from a terminal
1013 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1014 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1016 # Specify a bad --type
1017 os
. mkdir ( OSDDIR
+ "/fakeosd" )
1018 cmd
= ( " {path} /ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR
+ "/ {osd} --type foobar --op list --pgid {pg} " ). format ( osd
= "fakeosd" , pg
= ONEPG
, path
= CEPH_BIN
)
1019 ERRORS
+= test_failure ( cmd
, "Unable to create store of type foobar" )
1021 # Don't specify a data-path
1022 cmd
= " {path} /ceph-objectstore-tool --no-mon-config --type memstore --op list --pgid {pg} " . format ( dir = OSDDIR
, osd
= ONEOSD
, pg
= ONEPG
, path
= CEPH_BIN
)
1023 ERRORS
+= test_failure ( cmd
, "Must provide --data-path" )
1025 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid 2.0" ). format ( osd
= ONEOSD
)
1026 ERRORS
+= test_failure ( cmd
, "Please use export-remove or you must use --force option" )
1028 cmd
= ( CFSD_PREFIX
+ "--force --op remove" ). format ( osd
= ONEOSD
)
1029 ERRORS
+= test_failure ( cmd
, "Must provide pgid" )
1031 # Don't secify a --op nor object command
1032 cmd
= CFSD_PREFIX
. format ( osd
= ONEOSD
)
1033 ERRORS
+= test_failure ( cmd
, "Must provide --op or object command..." )
1035 # Specify a bad --op command
1036 cmd
= ( CFSD_PREFIX
+ "--op oops" ). format ( osd
= ONEOSD
)
1037 ERRORS
+= test_failure ( cmd
, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log)" )
1039 # Provide just the object param not a command
1040 cmd
= ( CFSD_PREFIX
+ "object" ). format ( osd
= ONEOSD
)
1041 ERRORS
+= test_failure ( cmd
, "Invalid syntax, missing command" )
1043 # Provide an object name that doesn't exist
1044 cmd
= ( CFSD_PREFIX
+ "NON_OBJECT get-bytes" ). format ( osd
= ONEOSD
)
1045 ERRORS
+= test_failure ( cmd
, "No object id 'NON_OBJECT' found" )
1047 # Provide an invalid object command
1048 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} '' notacommand" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1049 ERRORS
+= test_failure ( cmd
, "Unknown object command 'notacommand'" )
1051 cmd
= ( CFSD_PREFIX
+ "foo list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1052 ERRORS
+= test_failure ( cmd
, "No object id 'foo' found or invalid JSON specified" )
1054 cmd
= ( CFSD_PREFIX
+ "'{{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }}' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1055 ERRORS
+= test_failure ( cmd
, "Without --pgid the object '{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }' must be a JSON array" )
1057 cmd
= ( CFSD_PREFIX
+ "'[]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1058 ERRORS
+= test_failure ( cmd
, "Object '[]' must be a JSON array with 2 elements" )
1060 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" ]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1061 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" ]' must be a JSON array with 2 elements" )
1063 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" , 5, 8, 9]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1064 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" , 5, 8, 9]' must be a JSON array with 2 elements" )
1066 cmd
= ( CFSD_PREFIX
+ "'[1, 2]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1067 ERRORS
+= test_failure ( cmd
, "Object '[1, 2]' must be a JSON array with the first element a string" )
1069 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.3 \" ,{{ \" snapid \" : \" not an int \" }}]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1070 ERRORS
+= test_failure ( cmd
, "Decode object JSON error: value type is 2 not 4" )
1072 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
1073 ALLPGS
= OBJREPPGS
+ OBJECPGS
1074 OSDS
= get_osds ( ALLPGS
[ 0 ], OSDDIR
)
1077 print ( "Test all --op dump-journal" )
1078 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1079 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1081 # Test --op list and generate json for all objects
1082 print ( "Test --op list variants" )
1084 # retrieve all objects from all PGs
1085 tmpfd
= open ( TMPFILE
, "wb" )
1086 cmd
= ( CFSD_PREFIX
+ "--op list --format json" ). format ( osd
= osd
)
1088 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1090 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1093 lines
= get_lines ( TMPFILE
)
1094 JSONOBJ
= sorted ( set ( lines
))
1095 ( pgid
, coll
, jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1097 # retrieve all objects in a given PG
1098 tmpfd
= open ( OTHERFILE
, "ab" )
1099 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --format json" ). format ( osd
= osd
, pg
= pgid
)
1101 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1103 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1106 lines
= get_lines ( OTHERFILE
)
1107 JSONOBJ
= sorted ( set ( lines
))
1108 ( other_pgid
, other_coll
, other_jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1110 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1111 logging
. error ( "the first line of --op list is different "
1112 "from the first line of --op list --pgid {pg} " . format ( pg
= pgid
))
1115 # retrieve all objects with a given name in a given PG
1116 tmpfd
= open ( OTHERFILE
, "wb" )
1117 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} {object} --format json" ). format ( osd
= osd
, pg
= pgid
, object = jsondict
[ 'oid' ])
1119 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1121 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1124 lines
= get_lines ( OTHERFILE
)
1125 JSONOBJ
= sorted ( set ( lines
))
1126 ( other_pgid
, other_coll
, other_jsondict
) in json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1128 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1129 logging
. error ( "the first line of --op list is different "
1130 "from the first line of --op list --pgid {pg} {object} " . format ( pg
= pgid
, object = jsondict
[ 'oid' ]))
1133 print ( "Test --op list by generating json for all objects using default format" )
1135 OSDS
= get_osds ( pg
, OSDDIR
)
1137 tmpfd
= open ( TMPFILE
, "ab" )
1138 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1140 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1142 logging
. error ( "Bad exit status {ret} from --op list request" . format ( ret
= ret
))
1146 lines
= get_lines ( TMPFILE
)
1147 JSONOBJ
= sorted ( set ( lines
))
1148 for JSON
in JSONOBJ
:
1149 ( pgid
, jsondict
) = json
. loads ( JSON
)
1150 # Skip clones for now
1151 if jsondict
[ 'snapid' ] != - 2 :
1153 db
[ jsondict
[ 'namespace' ]][ jsondict
[ 'oid' ]][ 'json' ] = json
. dumps (( pgid
, jsondict
))
1154 # print db[jsondict['namespace']][jsondict['oid']]['json']
1155 if jsondict
[ 'oid' ]. find ( EC_NAME
) == 0 and 'shard_id' not in jsondict
:
1156 logging
. error ( "Malformed JSON {json} " . format ( json
= JSON
))
1160 print ( "Test get-bytes and set-bytes" )
1161 for nspace
in db
. keys ():
1162 for basename
in db
[ nspace
]. keys ():
1163 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1164 JSON
= db
[ nspace
][ basename
][ 'json' ]
1165 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1166 TESTNAME
= "/tmp/testbytes. {pid} " . format ( pid
= pid
)
1167 SETNAME
= "/tmp/setbytes. {pid} " . format ( pid
= pid
)
1168 BADNAME
= "/tmp/badbytes. {pid} " . format ( pid
= pid
)
1169 for pg
in OBJREPPGS
:
1170 OSDS
= get_osds ( pg
, OSDDIR
)
1172 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1173 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1174 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1181 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-bytes {fname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, fname
= GETNAME
)
1183 ret
= call ( cmd
, shell
= True )
1185 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1188 cmd
= "diff -q {file} {getfile} " . format ( file = file , getfile
= GETNAME
)
1189 ret
= call ( cmd
, shell
= True )
1191 logging
. error ( "Data from get-bytes differ" )
1192 logging
. debug ( "Got:" )
1193 cat_file ( logging
. DEBUG
, GETNAME
)
1194 logging
. debug ( "Expected:" )
1195 cat_file ( logging
. DEBUG
, file )
1197 fd
= open ( SETNAME
, "w" )
1198 data
= "put-bytes going into {file} \n " . format ( file = file )
1201 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= SETNAME
)
1203 ret
= call ( cmd
, shell
= True )
1205 logging
. error ( "Bad exit status {ret} from set-bytes" . format ( ret
= ret
))
1207 fd
= open ( TESTNAME
, "wb" )
1208 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1210 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1213 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1215 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1217 ret
= call ( cmd
, shell
= True )
1219 logging
. error ( "Data after set-bytes differ" )
1220 logging
. debug ( "Got:" )
1221 cat_file ( logging
. DEBUG
, TESTNAME
)
1222 logging
. debug ( "Expected:" )
1223 cat_file ( logging
. DEBUG
, SETNAME
)
1226 # Use set-bytes with --dry-run and make sure contents haven't changed
1227 fd
= open ( BADNAME
, "w" )
1228 data
= "Bad data for --dry-run in {file} \n " . format ( file = file )
1231 cmd
= ( CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= BADNAME
)
1233 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1235 logging
. error ( "Bad exit status {ret} from set-bytes --dry-run" . format ( ret
= ret
))
1237 fd
= open ( TESTNAME
, "wb" )
1238 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1240 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1243 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1245 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1247 ret
= call ( cmd
, shell
= True )
1249 logging
. error ( "Data after set-bytes --dry-run changed!" )
1250 logging
. debug ( "Got:" )
1251 cat_file ( logging
. DEBUG
, TESTNAME
)
1252 logging
. debug ( "Expected:" )
1253 cat_file ( logging
. DEBUG
, SETNAME
)
1256 fd
= open ( file , "rb" )
1257 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1259 ret
= call ( cmd
, shell
= True , stdin
= fd
)
1261 logging
. error ( "Bad exit status {ret} from set-bytes to restore object" . format ( ret
= ret
))
1282 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1283 print ( "Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap" )
1284 for nspace
in db
. keys ():
1285 for basename
in db
[ nspace
]. keys ():
1286 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1287 JSON
= db
[ nspace
][ basename
][ 'json' ]
1288 for pg
in OBJREPPGS
:
1289 OSDS
= get_osds ( pg
, OSDDIR
)
1291 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1292 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1293 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1296 for key
, val
in db
[ nspace
][ basename
][ "xattr" ]. items ():
1298 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= attrkey
)
1300 getval
= check_output ( cmd
, shell
= True )
1302 logging
. error ( "get-attr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= attrkey
, get
= getval
, orig
= val
))
1305 # set-attr to bogus value "foobar"
1306 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1308 ret
= call ( cmd
, shell
= True )
1310 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1313 # Test set-attr with dry-run
1314 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1316 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1318 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1321 # Check the set-attr
1322 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1324 getval
= check_output ( cmd
, shell
= True )
1326 logging
. error ( "Bad exit status {ret} from get-attr" . format ( ret
= ret
))
1329 if getval
!= "foobar" :
1330 logging
. error ( "Check of set-attr failed because we got {val} " . format ( val
= getval
))
1334 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1336 ret
= call ( cmd
, shell
= True )
1338 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1341 # Check rm-attr with dry-run
1342 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1344 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1346 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1349 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1351 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1353 logging
. error ( "For rm-attr expect get-attr to fail, but it succeeded" )
1356 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
, val
= val
)
1358 ret
= call ( cmd
, shell
= True )
1360 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1364 hdr
= db
[ nspace
][ basename
]. get ( "omapheader" , "" )
1365 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, json
= JSON
)
1367 gethdr
= check_output ( cmd
, shell
= True )
1369 logging
. error ( "get-omaphdr was wrong: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
1372 # set-omaphdr to bogus value "foobar"
1373 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1375 ret
= call ( cmd
, shell
= True )
1377 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1380 # Check the set-omaphdr
1381 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1383 gethdr
= check_output ( cmd
, shell
= True )
1385 logging
. error ( "Bad exit status {ret} from get-omaphdr" . format ( ret
= ret
))
1388 if gethdr
!= "foobar" :
1389 logging
. error ( "Check of set-omaphdr failed because we got {val} " . format ( val
= getval
))
1392 # Test dry-run with set-omaphdr
1393 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1395 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1397 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1401 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
, val
= hdr
)
1403 ret
= call ( cmd
, shell
= True )
1405 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1409 for omapkey
, val
in db
[ nspace
][ basename
][ "omap" ]. items ():
1410 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-omap {key} " ). format ( osd
= osd
, json
= JSON
, key
= omapkey
)
1412 getval
= check_output ( cmd
, shell
= True )
1414 logging
. error ( "get-omap of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= omapkey
, get
= getval
, orig
= val
))
1417 # set-omap to bogus value "foobar"
1418 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1420 ret
= call ( cmd
, shell
= True )
1422 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1425 # Check set-omap with dry-run
1426 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1428 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1430 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1433 # Check the set-omap
1434 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1436 getval
= check_output ( cmd
, shell
= True )
1438 logging
. error ( "Bad exit status {ret} from get-omap" . format ( ret
= ret
))
1441 if getval
!= "foobar" :
1442 logging
. error ( "Check of set-omap failed because we got {val} " . format ( val
= getval
))
1446 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1448 ret
= call ( cmd
, shell
= True )
1450 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1452 # Check rm-omap with dry-run
1453 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1455 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1457 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1459 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1461 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1463 logging
. error ( "For rm-omap expect get-omap to fail, but it succeeded" )
1466 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
, val
= val
)
1468 ret
= call ( cmd
, shell
= True )
1470 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1476 for nspace
in db
. keys ():
1477 for basename
in db
[ nspace
]. keys ():
1478 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1479 JSON
= db
[ nspace
][ basename
][ 'json' ]
1480 jsondict
= json
. loads ( JSON
)
1481 for pg
in OBJREPPGS
:
1482 OSDS
= get_osds ( pg
, OSDDIR
)
1484 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1485 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1486 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1489 if int ( basename
. split ( REP_NAME
)[ 1 ]) > int ( NUM_CLONED_REP_OBJECTS
):
1491 logging
. debug ( "REPobject " + JSON
)
1492 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" snap \" : 1,' > /dev/null" ). format ( osd
= osd
, json
= JSON
)
1494 ret
= call ( cmd
, shell
= True )
1496 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1498 if 'shard_id' in jsondict
[ 1 ]:
1499 logging
. debug ( "ECobject " + JSON
)
1501 OSDS
= get_osds ( pg
, OSDDIR
)
1502 jsondict
= json
. loads ( JSON
)
1504 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1505 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1506 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1509 if int ( basename
. split ( EC_NAME
)[ 1 ]) > int ( NUM_EC_OBJECTS
):
1511 # Fix shard_id since we only have one json instance for each object
1512 jsondict
[ 1 ][ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1513 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" hinfo \" : [{{]' > /dev/null" ). format ( osd
= osd
, json
= json
. dumps (( pg
, jsondict
[ 1 ])))
1515 ret
= call ( cmd
, shell
= True )
1517 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1519 print ( "Test list-attrs get-attr" )
1520 ATTRFILE
= r
"/tmp/attrs. {pid} " . format ( pid
= pid
)
1521 VALFILE
= r
"/tmp/val. {pid} " . format ( pid
= pid
)
1522 for nspace
in db
. keys ():
1523 for basename
in db
[ nspace
]. keys ():
1524 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
)
1525 JSON
= db
[ nspace
][ basename
][ 'json' ]
1526 jsondict
= json
. loads ( JSON
)
1528 if 'shard_id' in jsondict
[ 1 ]:
1529 logging
. debug ( "ECobject " + JSON
)
1532 OSDS
= get_osds ( pg
, OSDDIR
)
1533 # Fix shard_id since we only have one json instance for each object
1534 jsondict
[ 1 ][ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1535 JSON
= json
. dumps (( pg
, jsondict
[ 1 ]))
1537 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr hinfo_key" ). format ( osd
= osd
, json
= JSON
)
1538 logging
. debug ( "TRY: " + cmd
)
1540 out
= check_output ( cmd
, shell
= True , stderr
= subprocess
. STDOUT
)
1541 logging
. debug ( "FOUND: {json} in {osd} has value ' {val} '" . format ( osd
= osd
, json
= JSON
, val
= out
))
1543 except subprocess
. CalledProcessError
as e
:
1544 if "No such file or directory" not in e
. output
and "No data available" not in e
. output
:
1546 # Assuming k=2 m=1 for the default ec pool
1548 logging
. error ( " {json} hinfo_key found {found} times instead of 3" . format ( json
= JSON
, found
= found
))
1552 # Make sure rep obj with rep pg or ec obj with ec pg
1553 if ( 'shard_id' in jsondict
[ 1 ]) != ( pg
. find ( 's' ) > 0 ):
1555 if 'shard_id' in jsondict
[ 1 ]:
1556 # Fix shard_id since we only have one json instance for each object
1557 jsondict
[ 1 ][ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1558 JSON
= json
. dumps (( pg
, jsondict
[ 1 ]))
1559 OSDS
= get_osds ( pg
, OSDDIR
)
1561 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1562 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1563 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1566 afd
= open ( ATTRFILE
, "wb" )
1567 cmd
= ( CFSD_PREFIX
+ " ' {json} ' list-attrs" ). format ( osd
= osd
, json
= JSON
)
1569 ret
= call ( cmd
, shell
= True , stdout
= afd
)
1572 logging
. error ( "list-attrs failed with {ret} " . format ( ret
= ret
))
1575 keys
= get_lines ( ATTRFILE
)
1576 values
= dict ( db
[ nspace
][ basename
][ "xattr" ])
1578 if key
== "_" or key
== "snapset" or key
== "hinfo_key" :
1580 key
= key
. strip ( "_" )
1581 if key
not in values
:
1582 logging
. error ( "Unexpected key {key} present" . format ( key
= key
))
1585 exp
= values
. pop ( key
)
1586 vfd
= open ( VALFILE
, "wb" )
1587 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= "_" + key
)
1589 ret
= call ( cmd
, shell
= True , stdout
= vfd
)
1592 logging
. error ( "get-attr failed with {ret} " . format ( ret
= ret
))
1595 lines
= get_lines ( VALFILE
)
1598 logging
. error ( "For key {key} got value {got} instead of {expected} " . format ( key
= key
, got
= val
, expected
= exp
))
1600 if len ( values
) != 0 :
1601 logging
. error ( "Not all keys found, remaining keys:" )
1604 print ( "Test --op meta-list" )
1605 tmpfd
= open ( TMPFILE
, "wb" )
1606 cmd
= ( CFSD_PREFIX
+ "--op meta-list" ). format ( osd
= ONEOSD
)
1608 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1610 logging
. error ( "Bad exit status {ret} from --op meta-list request" . format ( ret
= ret
))
1613 print ( "Test get-bytes on meta" )
1615 lines
= get_lines ( TMPFILE
)
1616 JSONOBJ
= sorted ( set ( lines
))
1617 for JSON
in JSONOBJ
:
1618 ( pgid
, jsondict
) = json
. loads ( JSON
)
1620 logging
. error ( "pgid incorrect for --op meta-list {pgid} " . format ( pgid
= pgid
))
1622 if jsondict
[ 'namespace' ] != "" :
1623 logging
. error ( "namespace non null --op meta-list {ns} " . format ( ns
= jsondict
[ 'namespace' ]))
1630 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-bytes {fname} " ). format ( osd
= ONEOSD
, json
= JSON
, fname
= GETNAME
)
1632 ret
= call ( cmd
, shell
= True )
1634 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1646 print ( "Test pg info" )
1647 for pg
in ALLREPPGS
+ ALLECPGS
:
1648 for osd
in get_osds ( pg
, OSDDIR
):
1649 cmd
= ( CFSD_PREFIX
+ "--op info --pgid {pg} | grep ' \" pgid \" : \" {pg} \" '" ). format ( osd
= osd
, pg
= pg
)
1651 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1653 logging
. error ( "Getting info failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1656 print ( "Test pg logging" )
1657 if len ( ALLREPPGS
+ ALLECPGS
) == len ( OBJREPPGS
+ OBJECPGS
):
1658 logging
. warning ( "All PGs have objects, so no log without modify entries" )
1659 for pg
in ALLREPPGS
+ ALLECPGS
:
1660 for osd
in get_osds ( pg
, OSDDIR
):
1661 tmpfd
= open ( TMPFILE
, "wb" )
1662 cmd
= ( CFSD_PREFIX
+ "--op log --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1664 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1666 logging
. error ( "Getting log failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1668 HASOBJ
= pg
in OBJREPPGS
+ OBJECPGS
1670 for line
in get_lines ( TMPFILE
):
1671 if line
. find ( "modify" ) != - 1 :
1674 if HASOBJ
!= MODOBJ
:
1675 logging
. error ( "Bad log for pg {pg} from {osd} " . format ( pg
= pg
, osd
= osd
))
1676 MSG
= ( HASOBJ
and [ "" ] or [ "NOT " ])[ 0 ]
1677 print ( "Log should {msg} have a modify entry" . format ( msg
= MSG
))
1685 print ( "Test list-pgs" )
1686 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1688 CHECK_PGS
= get_osd_pgs ( os
. path
. join ( OSDDIR
, osd
), None )
1689 CHECK_PGS
= sorted ( CHECK_PGS
)
1691 cmd
= ( CFSD_PREFIX
+ "--op list-pgs" ). format ( osd
= osd
)
1693 TEST_PGS
= check_output ( cmd
, shell
= True ). split ( " \n " )
1694 TEST_PGS
= sorted ( TEST_PGS
)[ 1 :] # Skip extra blank line
1696 if TEST_PGS
!= CHECK_PGS
:
1697 logging
. error ( "list-pgs got wrong result for osd. {osd} " . format ( osd
= osd
))
1698 logging
. error ( "Expected {pgs} " . format ( pgs
= CHECK_PGS
))
1699 logging
. error ( "Got {pgs} " . format ( pgs
= TEST_PGS
))
1703 print ( "Test pg export --dry-run" )
1705 osd
= get_osds ( pg
, OSDDIR
)[ 0 ]
1706 fname
= "/tmp/fname. {pid} " . format ( pid
= pid
)
1707 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1709 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1711 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1713 elif os
. path
. exists ( fname
):
1714 logging
. error ( "Exporting --dry-run created file" )
1717 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1719 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1721 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1724 outdata
= get_lines ( fname
)
1725 if len ( outdata
) > 0 :
1726 logging
. error ( "Exporting --dry-run to stdout not empty" )
1727 logging
. error ( "Data: " + outdata
)
1731 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1732 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1733 print ( "Test pg export" )
1734 for pg
in ALLREPPGS
+ ALLECPGS
:
1735 for osd
in get_osds ( pg
, OSDDIR
):
1736 mydir
= os
. path
. join ( TESTDIR
, osd
)
1737 fname
= os
. path
. join ( mydir
, pg
)
1738 if pg
== ALLREPPGS
[ 0 ]:
1739 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1740 elif pg
== ALLREPPGS
[ 1 ]:
1741 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file - > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1743 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1745 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1747 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1750 ERRORS
+= EXP_ERRORS
1752 print ( "Test clear-data-digest" )
1753 for nspace
in db
. keys ():
1754 for basename
in db
[ nspace
]. keys ():
1755 JSON
= db
[ nspace
][ basename
][ 'json' ]
1756 cmd
= ( CFSD_PREFIX
+ "' {json} ' clear-data-digest" ). format ( osd
= 'osd0' , json
= JSON
)
1758 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1760 logging
. error ( "Clearing data digest failed for {json} " . format ( json
= JSON
))
1763 cmd
= ( CFSD_PREFIX
+ "' {json} ' dump | grep ' \" data_digest \" : \" 0xff'" ). format ( osd
= 'osd0' , json
= JSON
)
1765 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1767 logging
. error ( "Data digest not cleared for {json} " . format ( json
= JSON
))
1773 print ( "Test pg removal" )
1775 for pg
in ALLREPPGS
+ ALLECPGS
:
1776 for osd
in get_osds ( pg
, OSDDIR
):
1777 # This should do nothing
1778 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} --dry-run" ). format ( pg
= pg
, osd
= osd
)
1780 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1782 logging
. error ( "Removing --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1784 cmd
= ( CFSD_PREFIX
+ "--force --op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1786 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1788 logging
. error ( "Removing failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1794 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 :
1795 print ( "Test pg import" )
1796 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1797 dir = os
. path
. join ( TESTDIR
, osd
)
1798 PGS
= [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]
1800 file = os
. path
. join ( dir , pg
)
1801 # Make sure this doesn't crash
1802 cmd
= ( CFSD_PREFIX
+ "--op dump-export --file {file} " ). format ( osd
= osd
, file = file )
1804 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1806 logging
. error ( "Dump-export failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1808 # This should do nothing
1809 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} --dry-run" ). format ( osd
= osd
, file = file )
1811 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1813 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1816 cmd
= ( "cat {file} |" . format ( file = file ) + CFSD_PREFIX
+ "--op import" ). format ( osd
= osd
)
1818 cmd
= ( CFSD_PREFIX
+ "--op import --file - --pgid {pg} < {file} " ). format ( osd
= osd
, file = file , pg
= pg
)
1820 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} " ). format ( osd
= osd
, file = file )
1822 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1824 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1827 logging
. warning ( "SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES" )
1829 ERRORS
+= IMP_ERRORS
1832 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1833 print ( "Verify replicated import data" )
1834 data_errors
, _
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, REP_NAME
)
1835 ERRORS
+= data_errors
1837 logging
. warning ( "SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES" )
1839 print ( "Test all --op dump-journal again" )
1840 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1841 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1846 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1847 print ( "Verify erasure coded import data" )
1848 ERRORS
+= verify ( DATADIR
, EC_POOL
, EC_NAME
, db
)
1849 # Check replicated data/xattr/omap using rados
1850 print ( "Verify replicated import data using rados" )
1851 ERRORS
+= verify ( DATADIR
, REP_POOL
, REP_NAME
, db
)
1854 NEWPOOL
= "rados-import-pool"
1855 cmd
= " {path} /ceph osd pool create {pool} 8" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1857 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1859 print ( "Test rados import" )
1861 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1862 dir = os
. path
. join ( TESTDIR
, osd
)
1863 for pg
in [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]:
1864 if pg
. find ( " {id} ." . format ( id = REPID
)) != 0 :
1866 file = os
. path
. join ( dir , pg
)
1869 # This should do nothing
1870 cmd
= " {path} /rados import -p {pool} --dry-run {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1872 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1874 logging
. error ( "Rados import --dry-run failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1876 cmd
= " {path} /rados -p {pool} ls" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1878 data
= check_output ( cmd
, shell
= True )
1880 logging
. error ( "' {data} '" . format ( data
= data
))
1881 logging
. error ( "Found objects after dry-run" )
1883 cmd
= " {path} /rados import -p {pool} {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1885 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1887 logging
. error ( "Rados import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1889 cmd
= " {path} /rados import -p {pool} --no-overwrite {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1891 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1893 logging
. error ( "Rados import --no-overwrite failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1896 ERRORS
+= verify ( DATADIR
, NEWPOOL
, REP_NAME
, db
)
1898 logging
. warning ( "SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES" )
1900 # Clear directories of previous portion
1901 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1902 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1906 # Cause SPLIT_POOL to split and test import with object/log filtering
1907 print ( "Testing import all objects after a split" )
1908 SPLIT_POOL
= "split_pool"
1911 SPLIT_NSPACE_COUNT
= 2
1912 SPLIT_NAME
= "split"
1913 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= SPLIT_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
1915 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1916 SPLITID
= get_pool_id ( SPLIT_POOL
, nullfd
)
1917 pool_size
= int ( check_output ( " {path} /ceph osd pool get {pool} size" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
), shell
= True , stderr
= nullfd
). split ( " " )[ 1 ])
1922 objects
= range ( 1 , SPLIT_OBJ_COUNT
+ 1 )
1923 nspaces
= range ( SPLIT_NSPACE_COUNT
)
1925 nspace
= get_nspace ( n
)
1928 NAME
= SPLIT_NAME
+ " {num} " . format ( num
= i
)
1929 LNAME
= nspace
+ "-" + NAME
1930 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
1933 cmd
= "rm -f " + DDNAME
1935 call ( cmd
, shell
= True )
1938 dataline
= range ( DATALINECOUNT
)
1941 fd
= open ( DDNAME
, "w" )
1942 data
= "This is the split data for " + LNAME
+ " \n "
1947 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= SPLIT_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
1949 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
1951 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
1957 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1958 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1960 pg
= " {pool} .0" . format ( pool
= SPLITID
)
1963 export_osds
= get_osds ( pg
, OSDDIR
)
1964 for osd
in export_osds
:
1965 mydir
= os
. path
. join ( TESTDIR
, osd
)
1966 fname
= os
. path
. join ( mydir
, pg
)
1967 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1969 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1971 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1974 ERRORS
+= EXP_ERRORS
1980 cmd
= " {path} /ceph osd pool set {pool} pg_num 2" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
)
1982 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1988 # Now 2 PGs, poolid.0 and poolid.1
1989 # make note of pgs before we remove the pgs...
1990 osds
= get_osds ( " {pool} .0" . format ( pool
= SPLITID
), OSDDIR
);
1991 for seed
in range ( 2 ):
1992 pg
= " {pool} . {seed} " . format ( pool
= SPLITID
, seed
= seed
)
1995 cmd
= ( CFSD_PREFIX
+ "--force --op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1997 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
2001 # This is weird. The export files are based on only the EXPORT_PG
2002 # and where that pg was before the split. Use 'which' to use all
2003 # export copies in import.
2004 mydir
= os
. path
. join ( TESTDIR
, export_osds
[ which
])
2005 fname
= os
. path
. join ( mydir
, EXPORT_PG
)
2007 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= EXPORT_PG
, file = fname
)
2009 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
2011 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
2014 ERRORS
+= IMP_ERRORS
2016 # Start up again to make sure imports didn't corrupt anything
2018 print ( "Verify split import data" )
2019 data_errors
, count
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
)
2020 ERRORS
+= data_errors
2021 if count
!= ( SPLIT_OBJ_COUNT
* SPLIT_NSPACE_COUNT
* pool_size
):
2022 logging
. error ( "Incorrect number of replicas seen {count} " . format ( count
= count
))
2027 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
2028 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
2030 ERRORS
+= test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
)
2032 # vstart() starts 4 OSDs
2033 ERRORS
+= test_get_set_osdmap ( CFSD_PREFIX
, list ( range ( 4 )), ALLOSDS
)
2034 ERRORS
+= test_get_set_inc_osdmap ( CFSD_PREFIX
, ALLOSDS
[ 0 ])
2037 CORES
= [ f
for f
in os
. listdir ( CEPH_DIR
) if f
. startswith ( "core." )]
2039 CORE_DIR
= os
. path
. join ( "/tmp" , "cores. {pid} " . format ( pid
= os
. getpid ()))
2041 call ( "/bin/mv {ceph_dir} /core.* {core_dir} " . format ( ceph_dir
= CEPH_DIR
, core_dir
= CORE_DIR
), shell
= True )
2042 logging
. error ( "Failure due to cores found" )
2043 logging
. error ( "See {core_dir} for cores" . format ( core_dir
= CORE_DIR
))
2044 ERRORS
+= len ( CORES
)
2047 print ( "TEST PASSED" )
2050 print ( "TEST FAILED WITH {errcount} ERRORS" . format ( errcount
= ERRORS
))
2054 def remove_btrfs_subvolumes ( path
):
2055 if platform
. system () == "FreeBSD" :
2057 result
= subprocess
. Popen ( "stat -f -c ' %% T' %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
2058 for line
in result
. stdout
:
2059 filesystem
= decode ( line
). rstrip ( ' \n ' )
2060 if filesystem
== "btrfs" :
2061 result
= subprocess
. Popen ( "sudo btrfs subvolume list %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
2062 for line
in result
. stdout
:
2063 subvolume
= decode ( line
). split ()[ 8 ]
2064 # extracting the relative volume name
2065 m
= re
. search ( ".*( %s .*)" % path
, subvolume
)
2068 call ( "sudo btrfs subvolume delete %s " % found
, shell
= True )
2071 if __name__
== "__main__" :
2074 status
= main ( sys
. argv
[ 1 :])
2077 os
. chdir ( CEPH_BUILD_DIR
)
2078 remove_btrfs_subvolumes ( CEPH_DIR
)
2079 call ( "/bin/rm -fr {dir} " . format ( dir = CEPH_DIR
), shell
= True )