]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/special/ceph_objectstore_tool.py
3 from __future__
import print_function
4 from subprocess
import call
6 from subprocess
import check_output
8 def check_output (* popenargs
, ** kwargs
):
10 # backported from python 2.7 stdlib
11 process
= subprocess
. Popen (
12 stdout
= subprocess
. PIPE
, * popenargs
, ** kwargs
)
13 output
, unused_err
= process
. communicate ()
14 retcode
= process
. poll ()
16 cmd
= kwargs
. get ( "args" )
19 error
= subprocess
. CalledProcessError ( retcode
, cmd
)
37 from subprocess
import DEVNULL
39 DEVNULL
= open ( os
. devnull
, "wb" )
41 logging
. basicConfig ( format
= ' %(levelname)s : %(message)s ' , level
= logging
. WARNING
,
45 if sys
. version_info
[ 0 ] >= 3 :
47 return s
. decode ( 'utf-8' )
49 def check_output (* args
, ** kwargs
): # noqa
50 return decode ( subprocess
. check_output (* args
, ** kwargs
))
57 def wait_for_health ():
58 print ( "Wait for health_ok..." , end
= "" )
60 while call ( " {path} /ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null" . format ( path
= CEPH_BIN
), shell
= True ) == 0 :
63 raise Exception ( "Time exceeded to go to health" )
68 def get_pool_id ( name
, nullfd
):
69 cmd
= " {path} /ceph osd pool stats {pool} " . format ( pool
= name
, path
= CEPH_BIN
). split ()
70 # pool {pool} id # .... grab the 4 field
71 return check_output ( cmd
, stderr
= nullfd
). split ()[ 3 ]
74 # return a list of unique PGS given an osd subdirectory
75 def get_osd_pgs ( SUBDIR
, ID
):
78 endhead
= re
. compile ( " {id} .*_head$" . format ( id = ID
))
79 DIR
= os
. path
. join ( SUBDIR
, "current" )
80 PGS
+= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and ( ID
is None or endhead
. match ( f
))]
81 PGS
= [ re
. sub ( "_head" , "" , p
) for p
in PGS
if "_head" in p
]
85 # return a sorted list of unique PGs given a directory
87 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
90 SUBDIR
= os
. path
. join ( DIR
, d
)
91 PGS
+= get_osd_pgs ( SUBDIR
, ID
)
92 return sorted ( set ( PGS
))
95 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
96 def get_objs ( ALLPGS
, prefix
, DIR
, ID
):
97 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
100 DIRL2
= os
. path
. join ( DIR
, d
)
101 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
104 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
106 FINALDIR
= os
. path
. join ( SUBDIR
, PGDIR
)
107 # See if there are any objects there
108 if any ( f
for f
in [ val
for _
, _
, fl
in os
. walk ( FINALDIR
) for val
in fl
] if f
. startswith ( prefix
)):
110 return sorted ( set ( PGS
))
113 # return a sorted list of OSDS which have data from a given PG
114 def get_osds ( PG
, DIR
):
115 ALLOSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
118 DIRL2
= os
. path
. join ( DIR
, d
)
119 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
121 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
127 def get_lines ( filename
):
128 tmpfd
= open ( filename
, "r" )
132 line
= tmpfd
. readline (). rstrip ( ' \n ' )
140 def cat_file ( level
, filename
):
141 if level
< logging
. getLogger (). getEffectiveLevel ():
143 print ( "File: " + filename
)
144 with
open ( filename
, "r" ) as f
:
146 line
= f
. readline (). rstrip ( ' \n ' )
153 def vstart ( new
, opt
= "-o osd_pool_default_pg_autoscale_mode=off" ):
154 print ( "vstarting...." , end
= "" )
155 NEW
= new
and "-n" or "-k"
156 call ( "MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 MGR_PYTHON_PATH= {path} /src/pybind/mgr {path} /src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1" . format ( new
= NEW
, opt
= opt
, path
= CEPH_ROOT
), shell
= True )
160 def test_failure ( cmd
, errmsg
, tty
= False ):
163 ttyfd
= open ( "/dev/tty" , "rwb" )
164 except Exception as e
:
166 logging
. info ( "SKIP " + cmd
)
168 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
169 tmpfd
= open ( TMPFILE
, "wb" )
173 ret
= call ( cmd
, shell
= True , stdin
= ttyfd
, stdout
= ttyfd
, stderr
= tmpfd
)
176 ret
= call ( cmd
, shell
= True , stderr
= tmpfd
)
180 logging
. error ( "Should have failed, but got exit 0" )
182 lines
= get_lines ( TMPFILE
)
183 matched
= [ l
for l
in lines
if errmsg
in l
]
185 logging
. info ( "Correctly failed with message \" " + matched
[ 0 ] + " \" " )
188 logging
. error ( "Command: " + cmd
)
189 logging
. error ( "Bad messages to stderr \" " + str ( lines
) + " \" " )
190 logging
. error ( "Expected \" " + errmsg
+ " \" " )
197 return "ns {num} " . format ( num
= num
)
200 def verify ( DATADIR
, POOL
, NAME_PREFIX
, db
):
201 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
203 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( NAME_PREFIX
) == 0 ]:
204 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
205 clone
= rawnsfile
. split ( "__" )[ 1 ]
206 nspace
= nsfile
. split ( "-" )[ 0 ]
207 file = nsfile
. split ( "-" )[ 1 ]
211 path
= os
. path
. join ( DATADIR
, rawnsfile
)
216 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' get {file} {out} " . format ( pool
= POOL
, file = file , out
= TMPFILE
, nspace
= nspace
, path
= CEPH_BIN
)
218 call ( cmd
, shell
= True , stdout
= DEVNULL
, stderr
= DEVNULL
)
219 cmd
= "diff -q {src} {result} " . format ( src
= path
, result
= TMPFILE
)
221 ret
= call ( cmd
, shell
= True )
223 logging
. error ( " {file} data not imported properly" . format ( file = file ))
229 for key
, val
in db
[ nspace
][ file ][ "xattr" ]. items ():
230 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getxattr {name} {key} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, path
= CEPH_BIN
)
232 getval
= check_output ( cmd
, shell
= True , stderr
= DEVNULL
)
233 logging
. debug ( "getxattr {key} {val} " . format ( key
= key
, val
= getval
))
235 logging
. error ( "getxattr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= key
, get
= getval
, orig
= val
))
238 hdr
= db
[ nspace
][ file ]. get ( "omapheader" , "" )
239 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapheader {name} {file} " . format ( pool
= POOL
, name
= file , nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
241 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
243 logging
. error ( "rados getomapheader returned {ret} " . format ( ret
= ret
))
246 getlines
= get_lines ( TMPFILE
)
247 assert ( len ( getlines
) == 0 or len ( getlines
) == 1 )
248 if len ( getlines
) == 0 :
252 logging
. debug ( "header: {hdr} " . format ( hdr
= gethdr
))
254 logging
. error ( "getomapheader returned wrong val: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
256 for key
, val
in db
[ nspace
][ file ][ "omap" ]. items ():
257 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapval {name} {key} {file} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
259 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
261 logging
. error ( "getomapval returned {ret} " . format ( ret
= ret
))
264 getlines
= get_lines ( TMPFILE
)
265 if len ( getlines
) != 1 :
266 logging
. error ( "Bad data from getomapval {lines} " . format ( lines
= getlines
))
270 logging
. debug ( "getomapval {key} {val} " . format ( key
= key
, val
= getval
))
272 logging
. error ( "getomapval returned wrong val: {get} instead of {orig} " . format ( get
= getval
, orig
= val
))
281 def check_journal ( jsondict
):
283 if 'header' not in jsondict
:
284 logging
. error ( "Key 'header' not in dump-journal" )
286 elif 'max_size' not in jsondict
[ 'header' ]:
287 logging
. error ( "Key 'max_size' not in dump-journal header" )
290 print ( " \t Journal max_size = {size} " . format ( size
= jsondict
[ 'header' ][ 'max_size' ]))
291 if 'entries' not in jsondict
:
292 logging
. error ( "Key 'entries' not in dump-journal output" )
294 elif len ( jsondict
[ 'entries' ]) == 0 :
295 logging
. info ( "No entries in journal found" )
297 errors
+= check_journal_entries ( jsondict
[ 'entries' ])
301 def check_journal_entries ( entries
):
303 for enum
in range ( len ( entries
)):
304 if 'offset' not in entries
[ enum
]:
305 logging
. error ( "No 'offset' key in entry {e} " . format ( e
= enum
))
307 if 'seq' not in entries
[ enum
]:
308 logging
. error ( "No 'seq' key in entry {e} " . format ( e
= enum
))
310 if 'transactions' not in entries
[ enum
]:
311 logging
. error ( "No 'transactions' key in entry {e} " . format ( e
= enum
))
313 elif len ( entries
[ enum
][ 'transactions' ]) == 0 :
314 logging
. error ( "No transactions found in entry {e} " . format ( e
= enum
))
317 errors
+= check_entry_transactions ( entries
[ enum
], enum
)
321 def check_entry_transactions ( entry
, enum
):
323 for tnum
in range ( len ( entry
[ 'transactions' ])):
324 if 'trans_num' not in entry
[ 'transactions' ][ tnum
]:
325 logging
. error ( "Key 'trans_num' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
327 elif entry
[ 'transactions' ][ tnum
][ 'trans_num' ] != tnum
:
328 ft
= entry
[ 'transactions' ][ tnum
][ 'trans_num' ]
329 logging
. error ( "Bad trans_num ( {ft} ) entry {e} trans {t} " . format ( ft
= ft
, e
= enum
, t
= tnum
))
331 if 'ops' not in entry
[ 'transactions' ][ tnum
]:
332 logging
. error ( "Key 'ops' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
335 errors
+= check_transaction_ops ( entry
[ 'transactions' ][ tnum
][ 'ops' ], enum
, tnum
)
339 def check_transaction_ops ( ops
, enum
, tnum
):
341 logging
. warning ( "No ops found in entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
343 for onum
in range ( len ( ops
)):
344 if 'op_num' not in ops
[ onum
]:
345 logging
. error ( "Key 'op_num' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
347 elif ops
[ onum
][ 'op_num' ] != onum
:
348 fo
= ops
[ onum
][ 'op_num' ]
349 logging
. error ( "Bad op_num ( {fo} ) from entry {e} trans {t} op {o} " . format ( fo
= fo
, e
= enum
, t
= tnum
, o
= onum
))
351 if 'op_name' not in ops
[ onum
]:
352 logging
. error ( "Key 'op_name' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
357 def test_dump_journal ( CFSD_PREFIX
, osds
):
360 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
363 # Test --op dump-journal by loading json
364 cmd
= ( CFSD_PREFIX
+ "--op dump-journal --format json" ). format ( osd
= osd
)
366 tmpfd
= open ( TMPFILE
, "wb" )
367 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
369 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
373 tmpfd
= open ( TMPFILE
, "r" )
374 jsondict
= json
. load ( tmpfd
)
378 journal_errors
= check_journal ( jsondict
)
379 if journal_errors
!= 0 :
380 logging
. error ( jsondict
)
381 ERRORS
+= journal_errors
385 CEPH_BUILD_DIR
= os
. environ
. get ( 'CEPH_BUILD_DIR' )
386 CEPH_BIN
= os
. environ
. get ( 'CEPH_BIN' )
387 CEPH_ROOT
= os
. environ
. get ( 'CEPH_ROOT' )
389 if not CEPH_BUILD_DIR
:
390 CEPH_BUILD_DIR
= os
. getcwd ()
391 os
. putenv ( 'CEPH_BUILD_DIR' , CEPH_BUILD_DIR
)
392 CEPH_BIN
= os
. path
. join ( CEPH_BUILD_DIR
, 'bin' )
393 os
. putenv ( 'CEPH_BIN' , CEPH_BIN
)
394 CEPH_ROOT
= os
. path
. dirname ( CEPH_BUILD_DIR
)
395 os
. putenv ( 'CEPH_ROOT' , CEPH_ROOT
)
396 CEPH_LIB
= os
. path
. join ( CEPH_BUILD_DIR
, 'lib' )
397 os
. putenv ( 'CEPH_LIB' , CEPH_LIB
)
402 pass # ok if this is already there
403 CEPH_DIR
= os
. path
. join ( CEPH_BUILD_DIR
, os
. path
. join ( "td" , "cot_dir" ))
404 CEPH_CONF
= os
. path
. join ( CEPH_DIR
, 'ceph.conf' )
407 call ( " {path} /init-ceph -c {conf} stop > /dev/null 2>&1" . format ( conf
= CEPH_CONF
, path
= CEPH_BIN
), shell
= True )
410 def check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
):
413 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( SPLIT_NAME
) == 0 ]:
414 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
415 clone
= rawnsfile
. split ( "__" )[ 1 ]
416 nspace
= nsfile
. split ( "-" )[ 0 ]
417 file = nsfile
. split ( "-" )[ 1 ] + "__" + clone
421 path
= os
. path
. join ( DATADIR
, rawnsfile
)
422 tmpfd
= open ( TMPFILE
, "wb" )
423 cmd
= "find {dir} -name ' {file} _*_ {nspace} _*'" . format ( dir = OSDDIR
, file = file , nspace
= nspace
)
425 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
427 logging
. critical ( "INTERNAL ERROR" )
430 obj_locs
= get_lines ( TMPFILE
)
431 if len ( obj_locs
) == 0 :
432 logging
. error ( "Can't find imported object {name} " . format ( name
= file ))
434 for obj_loc
in obj_locs
:
435 # For btrfs skip snap_* dirs
436 if re
. search ( "/snap_[0-9]*/" , obj_loc
) is not None :
439 cmd
= "diff -q {src} {obj_loc} " . format ( src
= path
, obj_loc
= obj_loc
)
441 ret
= call ( cmd
, shell
= True )
443 logging
. error ( " {file} data not imported properly into {obj} " . format ( file = file , obj
= obj_loc
))
445 return ERRORS
, repcount
448 def set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
449 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
450 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
451 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
452 osdmap_file
= osdmap_file
. name
)
453 output
= check_output ( cmd
, shell
= True )
454 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
456 new_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
457 old_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
458 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
459 crush_file
= old_crush_file
. name
, path
= CEPH_BIN
),
465 for osd_id
in osd_ids
:
466 cmd
= " {path} /crushtool -i {crush_file} --reweight-item osd. {osd} {weight} -o {new_crush_file} " . format ( osd
= osd_id
,
467 crush_file
= old_crush_file
. name
,
469 new_crush_file
= new_crush_file
. name
, path
= CEPH_BIN
)
470 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
472 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
474 # change them back, since we don't need to preapre for another round
475 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
476 old_crush_file
. close ()
478 ret
= call ( " {path} /osdmaptool --import-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
479 crush_file
= new_crush_file
. name
, path
= CEPH_BIN
),
485 # Minimum test of --dry-run by using it, but not checking anything
486 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
487 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
488 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
491 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
492 # to use use a different epoch than the one in osdmap
493 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
494 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
495 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
499 def get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
):
500 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
501 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
502 osdmap_file
= osdmap_file
. name
)
503 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
506 # we have to read the weights from the crush map, even we can query the weights using
507 # osdmaptool, but please keep in mind, they are different:
508 # item weights in crush map versus weight associated with each osd in osdmap
509 crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
510 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
511 crush_file
= crush_file
. name
, path
= CEPH_BIN
),
515 output
= check_output ( " {path} /crushtool --tree -i {crush_file} | tail -n {num_osd} " . format ( crush_file
= crush_file
. name
,
516 num_osd
= len ( osd_ids
), path
= CEPH_BIN
),
520 for line
in output
. strip (). split ( ' \n ' ):
522 linev
= re
. split ( '\s+' , line
)
525 print ( 'linev %s ' % linev
)
526 weights
. append ( float ( linev
[ 2 ]))
531 def test_get_set_osdmap ( CFSD_PREFIX
, osd_ids
, osd_paths
):
532 print ( "Testing get-osdmap and set-osdmap" )
535 weight
= 1 / math
. e
# just some magic number in [0, 1]
537 for osd_path
in osd_paths
:
538 if set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
539 changed
. append ( osd_path
)
541 logging
. warning ( "Failed to change the weights: {0} " . format ( osd_path
))
542 # i am pissed off if none of the store gets changed
546 for osd_path
in changed
:
547 weights
= get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
)
551 if any ( abs ( w
- weight
) > 1e-5 for w
in weights
):
552 logging
. warning ( "Weight is not changed: {0} != {1} " . format ( weights
, weight
))
556 def test_get_set_inc_osdmap ( CFSD_PREFIX
, osd_path
):
557 # incrementals are not used unless we need to build an MOSDMap to update
558 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
559 # with a different copy, and read it back to see if it matches.
561 file_e2
= tempfile
. NamedTemporaryFile ( delete
= True )
562 cmd
= ( CFSD_PREFIX
+ "--op get-inc-osdmap --file {file} " ). format ( osd
= osd_path
,
564 output
= check_output ( cmd
, shell
= True )
565 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
566 # backup e1 incremental before overwriting it
568 file_e1_backup
= tempfile
. NamedTemporaryFile ( delete
= True )
569 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
570 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
572 # overwrite e1 with e2
573 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --force --epoch {epoch} --file {file} "
574 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e2
. name
), shell
= True )
576 # Use dry-run to set back to e1 which shouldn't happen
577 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file} "
578 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
581 file_e1_read
= tempfile
. NamedTemporaryFile ( delete
= True )
582 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
583 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_read
. name
), shell
= True )
587 if not filecmp
. cmp ( file_e2
. name
, file_e1_read
. name
, shallow
= False ):
588 logging
. error ( "{{get,set}}-inc-osdmap mismatch {0} != {1} " . format ( file_e2
. name
, file_e1_read
. name
))
591 # revert the change with file_e1_backup
592 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --epoch {epoch} --file {file} "
593 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
595 logging
. error ( "Failed to revert the changed inc-osdmap" )
601 def test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
):
603 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
604 nullfd
= open ( os
. devnull
, "w" )
606 print ( "Test removeall" )
608 test_force_remove
= 0
609 for nspace
in db
. keys ():
610 for basename
in db
[ nspace
]. keys ():
611 JSON
= db
[ nspace
][ basename
][ 'json' ]
613 OSDS
= get_osds ( pg
, OSDDIR
)
615 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
616 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
617 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
621 if int ( basename
. split ( REP_NAME
)[ 1 ]) <= int ( NUM_CLONED_REP_OBJECTS
):
622 cmd
= ( CFSD_PREFIX
+ "' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
623 errors
+= test_failure ( cmd
, "Clones are present, use removeall to delete everything" )
624 if not test_force_remove
:
626 cmd
= ( CFSD_PREFIX
+ " ' {json} ' set-attr snapset /dev/null" ). format ( osd
= osd
, json
= JSON
)
628 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
630 logging
. error ( "Test set-up to corrupt snapset failed for {json} " . format ( json
= JSON
))
632 # Do the removeall since this test failed to set-up
634 test_force_remove
= 1
636 cmd
= ( CFSD_PREFIX
+ " ' {json} ' --force remove" ). format ( osd
= osd
, json
= JSON
)
638 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
640 logging
. error ( "forced remove with corrupt snapset failed for {json} " . format ( json
= JSON
))
644 cmd
= ( CFSD_PREFIX
+ " --force --dry-run ' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
646 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
648 logging
. error ( "remove with --force failed for {json} " . format ( json
= JSON
))
651 cmd
= ( CFSD_PREFIX
+ " --dry-run ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
653 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
655 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
658 cmd
= ( CFSD_PREFIX
+ " ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
660 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
662 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
665 tmpfd
= open ( TMPFILE
, "w" )
666 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --namespace {ns} {name} " ). format ( osd
= osd
, pg
= pg
, ns
= nspace
, name
= basename
)
668 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
670 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
673 lines
= get_lines ( TMPFILE
)
675 logging
. error ( "Removeall didn't remove all objects {ns} / {name} : {lines} " . format ( ns
= nspace
, name
= basename
, lines
= lines
))
679 cmd
= " {path} /rados -p {pool} rmsnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
681 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
683 logging
. error ( "rados rmsnap failed" )
691 if sys
. version_info
[ 0 ] < 3 :
692 sys
. stdout
= stdout
= os
. fdopen ( sys
. stdout
. fileno (), 'wb' , 0 )
694 stdout
= sys
. stdout
. buffer
695 if len ( argv
) > 1 and argv
[ 1 ] == "debug" :
700 call ( "rm -fr {dir} ; mkdir -p {dir} " . format ( dir = CEPH_DIR
), shell
= True )
702 os
. environ
[ "CEPH_DIR" ] = CEPH_DIR
704 REP_POOL
= "rep_pool"
705 REP_NAME
= "REPobject"
708 if len ( argv
) > 0 and argv
[ 0 ] == 'large' :
710 NUM_REP_OBJECTS
= 200
711 NUM_CLONED_REP_OBJECTS
= 50
714 # Larger data sets for first object per namespace
715 DATALINECOUNT
= 50000
716 # Number of objects to do xattr/omap testing on
721 NUM_CLONED_REP_OBJECTS
= 2
724 # Larger data sets for first object per namespace
726 # Number of objects to do xattr/omap testing on
730 TESTDIR
= "/tmp/test. {pid} " . format ( pid
= pid
)
731 DATADIR
= "/tmp/data. {pid} " . format ( pid
= pid
)
732 CFSD_PREFIX
= CEPH_BIN
+ "/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR
+ "/ {osd} "
733 PROFNAME
= "testecprofile"
735 os
. environ
[ 'CEPH_CONF' ] = CEPH_CONF
739 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= REP_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
741 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
743 REPID
= get_pool_id ( REP_POOL
, nullfd
)
745 print ( "Created Replicated pool # {repid} " . format ( repid
= REPID
))
747 cmd
= " {path} /ceph osd erasure-code-profile set {prof} crush-failure-domain=osd" . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
749 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
750 cmd
= " {path} /ceph osd erasure-code-profile get {prof} " . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
752 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
753 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} erasure {prof} " . format ( pool
= EC_POOL
, prof
= PROFNAME
, pg
= PG_COUNT
, path
= CEPH_BIN
)
755 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
756 ECID
= get_pool_id ( EC_POOL
, nullfd
)
758 print ( "Created Erasure coded pool # {ecid} " . format ( ecid
= ECID
))
760 print ( "Creating {objs} objects in replicated pool" . format ( objs
=( NUM_REP_OBJECTS
* NUM_NSPACES
)))
761 cmd
= "mkdir -p {datadir} " . format ( datadir
= DATADIR
)
763 call ( cmd
, shell
= True )
767 objects
= range ( 1 , NUM_REP_OBJECTS
+ 1 )
768 nspaces
= range ( NUM_NSPACES
)
770 nspace
= get_nspace ( n
)
775 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
776 LNAME
= nspace
+ "-" + NAME
777 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
780 cmd
= "rm -f " + DDNAME
782 call ( cmd
, shell
= True )
785 dataline
= range ( DATALINECOUNT
)
788 fd
= open ( DDNAME
, "w" )
789 data
= "This is the replicated data for " + LNAME
+ " \n "
794 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
796 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
798 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
801 db
[ nspace
][ NAME
] = {}
803 if i
< ATTR_OBJS
+ 1 :
807 db
[ nspace
][ NAME
][ "xattr" ] = {}
811 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
812 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
813 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
815 ret
= call ( cmd
, shell
= True )
817 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
819 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
821 # Create omap header in all objects but REPobject1
822 if i
< ATTR_OBJS
+ 1 and i
!= 1 :
823 myhdr
= "hdr {i} " . format ( i
= i
)
824 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapheader {name} {hdr} " . format ( pool
= REP_POOL
, name
= NAME
, hdr
= myhdr
, nspace
= nspace
, path
= CEPH_BIN
)
826 ret
= call ( cmd
, shell
= True )
828 logging
. critical ( "setomapheader failed with {ret} " . format ( ret
= ret
))
830 db
[ nspace
][ NAME
][ "omapheader" ] = myhdr
832 db
[ nspace
][ NAME
][ "omap" ] = {}
836 mykey
= "okey {i} - {k} " . format ( i
= i
, k
= k
)
837 myval
= "oval {i} - {k} " . format ( i
= i
, k
= k
)
838 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapval {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
840 ret
= call ( cmd
, shell
= True )
842 logging
. critical ( "setomapval failed with {ret} " . format ( ret
= ret
))
843 db
[ nspace
][ NAME
][ "omap" ][ mykey
] = myval
846 cmd
= " {path} /rados -p {pool} mksnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
848 call ( cmd
, shell
= True )
850 objects
= range ( 1 , NUM_CLONED_REP_OBJECTS
+ 1 )
851 nspaces
= range ( NUM_NSPACES
)
853 nspace
= get_nspace ( n
)
856 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
857 LNAME
= nspace
+ "-" + NAME
858 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
860 CLONENAME
= DDNAME
+ "__1"
863 cmd
= "mv -f " + DDNAME
+ " " + CLONENAME
865 call ( cmd
, shell
= True )
868 dataline
= range ( DATALINECOUNT
)
871 fd
= open ( DDNAME
, "w" )
872 data
= "This is the replicated data after a snapshot for " + LNAME
+ " \n "
877 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
879 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
881 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
884 print ( "Creating {objs} objects in erasure coded pool" . format ( objs
=( NUM_EC_OBJECTS
* NUM_NSPACES
)))
886 objects
= range ( 1 , NUM_EC_OBJECTS
+ 1 )
887 nspaces
= range ( NUM_NSPACES
)
889 nspace
= get_nspace ( n
)
892 NAME
= EC_NAME
+ " {num} " . format ( num
= i
)
893 LNAME
= nspace
+ "-" + NAME
894 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
897 cmd
= "rm -f " + DDNAME
899 call ( cmd
, shell
= True )
902 dataline
= range ( DATALINECOUNT
)
905 fd
= open ( DDNAME
, "w" )
906 data
= "This is the erasure coded data for " + LNAME
+ " \n "
911 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= EC_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
913 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
915 logging
. critical ( "Erasure coded pool creation failed with {ret} " . format ( ret
= ret
))
918 db
[ nspace
][ NAME
] = {}
920 db
[ nspace
][ NAME
][ "xattr" ] = {}
921 if i
< ATTR_OBJS
+ 1 :
928 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
929 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
930 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= EC_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
932 ret
= call ( cmd
, shell
= True )
934 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
936 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
938 # Omap isn't supported in EC pools
939 db
[ nspace
][ NAME
][ "omap" ] = {}
946 logging
. critical ( "Unable to set up test" )
949 ALLREPPGS
= get_pgs ( OSDDIR
, REPID
)
950 logging
. debug ( ALLREPPGS
)
951 ALLECPGS
= get_pgs ( OSDDIR
, ECID
)
952 logging
. debug ( ALLECPGS
)
954 OBJREPPGS
= get_objs ( ALLREPPGS
, REP_NAME
, OSDDIR
, REPID
)
955 logging
. debug ( OBJREPPGS
)
956 OBJECPGS
= get_objs ( ALLECPGS
, EC_NAME
, OSDDIR
, ECID
)
957 logging
. debug ( OBJECPGS
)
961 osds
= get_osds ( ONEPG
, OSDDIR
)
963 logging
. debug ( ONEOSD
)
965 print ( "Test invalid parameters" )
966 # On export can't use stdout to a terminal
967 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
968 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
970 # On export can't use stdout to a terminal
971 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
972 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
974 # Prep a valid ec export file for import failure tests
975 ONEECPG
= ALLECPGS
[ 0 ]
976 osds
= get_osds ( ONEECPG
, OSDDIR
)
978 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
979 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= ONEECPG
, file = OTHERFILE
)
981 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
985 # Prep a valid export file for import failure tests
986 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
987 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
989 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
991 # On import can't specify a different pgid than the file
992 TMPPG
= " {pool} .80" . format ( pool
= REPID
)
993 cmd
= ( CFSD_PREFIX
+ "--op import --pgid 12.dd --file {file} " ). format ( osd
= ONEOSD
, pg
= TMPPG
, file = OTHERFILE
)
994 ERRORS
+= test_failure ( cmd
, "specified pgid 12.dd does not match actual pgid" )
997 cmd
= ( CFSD_PREFIX
+ "--op import --file {FOO} " ). format ( osd
= ONEOSD
, FOO
= OTHERFILE
)
998 ERRORS
+= test_failure ( cmd
, "file: {FOO} : No such file or directory" . format ( FOO
= OTHERFILE
))
1000 cmd
= " {path} /ceph-objectstore-tool --no-mon-config --data-path BAD_DATA_PATH --op list" . format ( osd
= ONEOSD
, path
= CEPH_BIN
)
1001 ERRORS
+= test_failure ( cmd
, "data-path: BAD_DATA_PATH: No such file or directory" )
1003 cmd
= ( CFSD_PREFIX
+ "--journal-path BAD_JOURNAL_PATH --op list" ). format ( osd
= ONEOSD
)
1004 ERRORS
+= test_failure ( cmd
, "journal-path: BAD_JOURNAL_PATH: No such file or directory" )
1006 cmd
= ( CFSD_PREFIX
+ "--journal-path /bin --op list" ). format ( osd
= ONEOSD
)
1007 ERRORS
+= test_failure ( cmd
, "journal-path: /bin: (21) Is a directory" )
1009 # On import can't use stdin from a terminal
1010 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1011 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1013 # On import can't use stdin from a terminal
1014 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1015 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1017 # Specify a bad --type
1018 os
. mkdir ( OSDDIR
+ "/fakeosd" )
1019 cmd
= ( " {path} /ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR
+ "/ {osd} --type foobar --op list --pgid {pg} " ). format ( osd
= "fakeosd" , pg
= ONEPG
, path
= CEPH_BIN
)
1020 ERRORS
+= test_failure ( cmd
, "Unable to create store of type foobar" )
1022 # Don't specify a data-path
1023 cmd
= " {path} /ceph-objectstore-tool --no-mon-config --type memstore --op list --pgid {pg} " . format ( dir = OSDDIR
, osd
= ONEOSD
, pg
= ONEPG
, path
= CEPH_BIN
)
1024 ERRORS
+= test_failure ( cmd
, "Must provide --data-path" )
1026 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid 2.0" ). format ( osd
= ONEOSD
)
1027 ERRORS
+= test_failure ( cmd
, "Please use export-remove or you must use --force option" )
1029 cmd
= ( CFSD_PREFIX
+ "--force --op remove" ). format ( osd
= ONEOSD
)
1030 ERRORS
+= test_failure ( cmd
, "Must provide pgid" )
1032 # Don't secify a --op nor object command
1033 cmd
= CFSD_PREFIX
. format ( osd
= ONEOSD
)
1034 ERRORS
+= test_failure ( cmd
, "Must provide --op or object command..." )
1036 # Specify a bad --op command
1037 cmd
= ( CFSD_PREFIX
+ "--op oops" ). format ( osd
= ONEOSD
)
1038 ERRORS
+= test_failure ( cmd
, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log)" )
1040 # Provide just the object param not a command
1041 cmd
= ( CFSD_PREFIX
+ "object" ). format ( osd
= ONEOSD
)
1042 ERRORS
+= test_failure ( cmd
, "Invalid syntax, missing command" )
1044 # Provide an object name that doesn't exist
1045 cmd
= ( CFSD_PREFIX
+ "NON_OBJECT get-bytes" ). format ( osd
= ONEOSD
)
1046 ERRORS
+= test_failure ( cmd
, "No object id 'NON_OBJECT' found" )
1048 # Provide an invalid object command
1049 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} '' notacommand" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1050 ERRORS
+= test_failure ( cmd
, "Unknown object command 'notacommand'" )
1052 cmd
= ( CFSD_PREFIX
+ "foo list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1053 ERRORS
+= test_failure ( cmd
, "No object id 'foo' found or invalid JSON specified" )
1055 cmd
= ( CFSD_PREFIX
+ "'{{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }}' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1056 ERRORS
+= test_failure ( cmd
, "Without --pgid the object '{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }' must be a JSON array" )
1058 cmd
= ( CFSD_PREFIX
+ "'[]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1059 ERRORS
+= test_failure ( cmd
, "Object '[]' must be a JSON array with 2 elements" )
1061 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" ]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1062 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" ]' must be a JSON array with 2 elements" )
1064 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" , 5, 8, 9]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1065 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" , 5, 8, 9]' must be a JSON array with 2 elements" )
1067 cmd
= ( CFSD_PREFIX
+ "'[1, 2]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1068 ERRORS
+= test_failure ( cmd
, "Object '[1, 2]' must be a JSON array with the first element a string" )
1070 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.3 \" ,{{ \" snapid \" : \" not an int \" }}]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1071 ERRORS
+= test_failure ( cmd
, "Decode object JSON error: value type is 2 not 4" )
1073 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
1074 ALLPGS
= OBJREPPGS
+ OBJECPGS
1075 OSDS
= get_osds ( ALLPGS
[ 0 ], OSDDIR
)
1078 print ( "Test all --op dump-journal" )
1079 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1080 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1082 # Test --op list and generate json for all objects
1083 print ( "Test --op list variants" )
1085 # retrieve all objects from all PGs
1086 tmpfd
= open ( TMPFILE
, "wb" )
1087 cmd
= ( CFSD_PREFIX
+ "--op list --format json" ). format ( osd
= osd
)
1089 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1091 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1094 lines
= get_lines ( TMPFILE
)
1095 JSONOBJ
= sorted ( set ( lines
))
1096 ( pgid
, coll
, jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1098 # retrieve all objects in a given PG
1099 tmpfd
= open ( OTHERFILE
, "ab" )
1100 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --format json" ). format ( osd
= osd
, pg
= pgid
)
1102 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1104 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1107 lines
= get_lines ( OTHERFILE
)
1108 JSONOBJ
= sorted ( set ( lines
))
1109 ( other_pgid
, other_coll
, other_jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1111 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1112 logging
. error ( "the first line of --op list is different "
1113 "from the first line of --op list --pgid {pg} " . format ( pg
= pgid
))
1116 # retrieve all objects with a given name in a given PG
1117 tmpfd
= open ( OTHERFILE
, "wb" )
1118 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} {object} --format json" ). format ( osd
= osd
, pg
= pgid
, object = jsondict
[ 'oid' ])
1120 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1122 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1125 lines
= get_lines ( OTHERFILE
)
1126 JSONOBJ
= sorted ( set ( lines
))
1127 ( other_pgid
, other_coll
, other_jsondict
) in json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1129 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1130 logging
. error ( "the first line of --op list is different "
1131 "from the first line of --op list --pgid {pg} {object} " . format ( pg
= pgid
, object = jsondict
[ 'oid' ]))
1134 print ( "Test --op list by generating json for all objects using default format" )
1136 OSDS
= get_osds ( pg
, OSDDIR
)
1138 tmpfd
= open ( TMPFILE
, "ab" )
1139 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1141 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1143 logging
. error ( "Bad exit status {ret} from --op list request" . format ( ret
= ret
))
1147 lines
= get_lines ( TMPFILE
)
1148 JSONOBJ
= sorted ( set ( lines
))
1149 for JSON
in JSONOBJ
:
1150 ( pgid
, jsondict
) = json
. loads ( JSON
)
1151 # Skip clones for now
1152 if jsondict
[ 'snapid' ] != - 2 :
1154 db
[ jsondict
[ 'namespace' ]][ jsondict
[ 'oid' ]][ 'json' ] = json
. dumps (( pgid
, jsondict
))
1155 # print db[jsondict['namespace']][jsondict['oid']]['json']
1156 if jsondict
[ 'oid' ]. find ( EC_NAME
) == 0 and 'shard_id' not in jsondict
:
1157 logging
. error ( "Malformed JSON {json} " . format ( json
= JSON
))
1161 print ( "Test get-bytes and set-bytes" )
1162 for nspace
in db
. keys ():
1163 for basename
in db
[ nspace
]. keys ():
1164 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1165 JSON
= db
[ nspace
][ basename
][ 'json' ]
1166 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1167 TESTNAME
= "/tmp/testbytes. {pid} " . format ( pid
= pid
)
1168 SETNAME
= "/tmp/setbytes. {pid} " . format ( pid
= pid
)
1169 BADNAME
= "/tmp/badbytes. {pid} " . format ( pid
= pid
)
1170 for pg
in OBJREPPGS
:
1171 OSDS
= get_osds ( pg
, OSDDIR
)
1173 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1174 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1175 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1182 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-bytes {fname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, fname
= GETNAME
)
1184 ret
= call ( cmd
, shell
= True )
1186 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1189 cmd
= "diff -q {file} {getfile} " . format ( file = file , getfile
= GETNAME
)
1190 ret
= call ( cmd
, shell
= True )
1192 logging
. error ( "Data from get-bytes differ" )
1193 logging
. debug ( "Got:" )
1194 cat_file ( logging
. DEBUG
, GETNAME
)
1195 logging
. debug ( "Expected:" )
1196 cat_file ( logging
. DEBUG
, file )
1198 fd
= open ( SETNAME
, "w" )
1199 data
= "put-bytes going into {file} \n " . format ( file = file )
1202 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= SETNAME
)
1204 ret
= call ( cmd
, shell
= True )
1206 logging
. error ( "Bad exit status {ret} from set-bytes" . format ( ret
= ret
))
1208 fd
= open ( TESTNAME
, "wb" )
1209 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1211 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1214 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1216 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1218 ret
= call ( cmd
, shell
= True )
1220 logging
. error ( "Data after set-bytes differ" )
1221 logging
. debug ( "Got:" )
1222 cat_file ( logging
. DEBUG
, TESTNAME
)
1223 logging
. debug ( "Expected:" )
1224 cat_file ( logging
. DEBUG
, SETNAME
)
1227 # Use set-bytes with --dry-run and make sure contents haven't changed
1228 fd
= open ( BADNAME
, "w" )
1229 data
= "Bad data for --dry-run in {file} \n " . format ( file = file )
1232 cmd
= ( CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= BADNAME
)
1234 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1236 logging
. error ( "Bad exit status {ret} from set-bytes --dry-run" . format ( ret
= ret
))
1238 fd
= open ( TESTNAME
, "wb" )
1239 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1241 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1244 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1246 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1248 ret
= call ( cmd
, shell
= True )
1250 logging
. error ( "Data after set-bytes --dry-run changed!" )
1251 logging
. debug ( "Got:" )
1252 cat_file ( logging
. DEBUG
, TESTNAME
)
1253 logging
. debug ( "Expected:" )
1254 cat_file ( logging
. DEBUG
, SETNAME
)
1257 fd
= open ( file , "rb" )
1258 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1260 ret
= call ( cmd
, shell
= True , stdin
= fd
)
1262 logging
. error ( "Bad exit status {ret} from set-bytes to restore object" . format ( ret
= ret
))
1283 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1284 print ( "Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap" )
1285 for nspace
in db
. keys ():
1286 for basename
in db
[ nspace
]. keys ():
1287 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1288 JSON
= db
[ nspace
][ basename
][ 'json' ]
1289 for pg
in OBJREPPGS
:
1290 OSDS
= get_osds ( pg
, OSDDIR
)
1292 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1293 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1294 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1297 for key
, val
in db
[ nspace
][ basename
][ "xattr" ]. items ():
1299 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= attrkey
)
1301 getval
= check_output ( cmd
, shell
= True )
1303 logging
. error ( "get-attr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= attrkey
, get
= getval
, orig
= val
))
1306 # set-attr to bogus value "foobar"
1307 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1309 ret
= call ( cmd
, shell
= True )
1311 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1314 # Test set-attr with dry-run
1315 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1317 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1319 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1322 # Check the set-attr
1323 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1325 getval
= check_output ( cmd
, shell
= True )
1327 logging
. error ( "Bad exit status {ret} from get-attr" . format ( ret
= ret
))
1330 if getval
!= "foobar" :
1331 logging
. error ( "Check of set-attr failed because we got {val} " . format ( val
= getval
))
1335 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1337 ret
= call ( cmd
, shell
= True )
1339 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1342 # Check rm-attr with dry-run
1343 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1345 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1347 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1350 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1352 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1354 logging
. error ( "For rm-attr expect get-attr to fail, but it succeeded" )
1357 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
, val
= val
)
1359 ret
= call ( cmd
, shell
= True )
1361 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1365 hdr
= db
[ nspace
][ basename
]. get ( "omapheader" , "" )
1366 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, json
= JSON
)
1368 gethdr
= check_output ( cmd
, shell
= True )
1370 logging
. error ( "get-omaphdr was wrong: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
1373 # set-omaphdr to bogus value "foobar"
1374 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1376 ret
= call ( cmd
, shell
= True )
1378 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1381 # Check the set-omaphdr
1382 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1384 gethdr
= check_output ( cmd
, shell
= True )
1386 logging
. error ( "Bad exit status {ret} from get-omaphdr" . format ( ret
= ret
))
1389 if gethdr
!= "foobar" :
1390 logging
. error ( "Check of set-omaphdr failed because we got {val} " . format ( val
= getval
))
1393 # Test dry-run with set-omaphdr
1394 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1396 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1398 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1402 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
, val
= hdr
)
1404 ret
= call ( cmd
, shell
= True )
1406 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1410 for omapkey
, val
in db
[ nspace
][ basename
][ "omap" ]. items ():
1411 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-omap {key} " ). format ( osd
= osd
, json
= JSON
, key
= omapkey
)
1413 getval
= check_output ( cmd
, shell
= True )
1415 logging
. error ( "get-omap of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= omapkey
, get
= getval
, orig
= val
))
1418 # set-omap to bogus value "foobar"
1419 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1421 ret
= call ( cmd
, shell
= True )
1423 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1426 # Check set-omap with dry-run
1427 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1429 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1431 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1434 # Check the set-omap
1435 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1437 getval
= check_output ( cmd
, shell
= True )
1439 logging
. error ( "Bad exit status {ret} from get-omap" . format ( ret
= ret
))
1442 if getval
!= "foobar" :
1443 logging
. error ( "Check of set-omap failed because we got {val} " . format ( val
= getval
))
1447 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1449 ret
= call ( cmd
, shell
= True )
1451 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1453 # Check rm-omap with dry-run
1454 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1456 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1458 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1460 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1462 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1464 logging
. error ( "For rm-omap expect get-omap to fail, but it succeeded" )
1467 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
, val
= val
)
1469 ret
= call ( cmd
, shell
= True )
1471 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1477 for nspace
in db
. keys ():
1478 for basename
in db
[ nspace
]. keys ():
1479 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1480 JSON
= db
[ nspace
][ basename
][ 'json' ]
1481 jsondict
= json
. loads ( JSON
)
1482 for pg
in OBJREPPGS
:
1483 OSDS
= get_osds ( pg
, OSDDIR
)
1485 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1486 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1487 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1490 if int ( basename
. split ( REP_NAME
)[ 1 ]) > int ( NUM_CLONED_REP_OBJECTS
):
1492 logging
. debug ( "REPobject " + JSON
)
1493 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" snap \" : 1,' > /dev/null" ). format ( osd
= osd
, json
= JSON
)
1495 ret
= call ( cmd
, shell
= True )
1497 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1499 if 'shard_id' in jsondict
[ 1 ]:
1500 logging
. debug ( "ECobject " + JSON
)
1502 OSDS
= get_osds ( pg
, OSDDIR
)
1503 jsondict
= json
. loads ( JSON
)
1505 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1506 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1507 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1510 if int ( basename
. split ( EC_NAME
)[ 1 ]) > int ( NUM_EC_OBJECTS
):
1512 # Fix shard_id since we only have one json instance for each object
1513 jsondict
[ 1 ][ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1514 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" hinfo \" : [{{]' > /dev/null" ). format ( osd
= osd
, json
= json
. dumps (( pg
, jsondict
[ 1 ])))
1516 ret
= call ( cmd
, shell
= True )
1518 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1520 print ( "Test list-attrs get-attr" )
1521 ATTRFILE
= r
"/tmp/attrs. {pid} " . format ( pid
= pid
)
1522 VALFILE
= r
"/tmp/val. {pid} " . format ( pid
= pid
)
1523 for nspace
in db
. keys ():
1524 for basename
in db
[ nspace
]. keys ():
1525 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
)
1526 JSON
= db
[ nspace
][ basename
][ 'json' ]
1527 jsondict
= json
. loads ( JSON
)
1529 if 'shard_id' in jsondict
[ 1 ]:
1530 logging
. debug ( "ECobject " + JSON
)
1533 OSDS
= get_osds ( pg
, OSDDIR
)
1534 # Fix shard_id since we only have one json instance for each object
1535 jsondict
[ 1 ][ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1536 JSON
= json
. dumps (( pg
, jsondict
[ 1 ]))
1538 cmd
= ( CFSD_PREFIX
+ " --tty ' {json} ' get-attr hinfo_key" ). format ( osd
= osd
, json
= JSON
)
1539 logging
. debug ( "TRY: " + cmd
)
1541 out
= check_output ( cmd
, shell
= True , stderr
= subprocess
. STDOUT
)
1542 logging
. debug ( "FOUND: {json} in {osd} has value ' {val} '" . format ( osd
= osd
, json
= JSON
, val
= out
))
1544 except subprocess
. CalledProcessError
as e
:
1545 logging
. debug ( "Error message: {output} " . format ( output
= e
. output
))
1546 if "No such file or directory" not in str ( e
. output
) and \
1547 "No data available" not in str ( e
. output
) and \
1548 "not contained by pg" not in str ( e
. output
):
1550 # Assuming k=2 m=1 for the default ec pool
1552 logging
. error ( " {json} hinfo_key found {found} times instead of 3" . format ( json
= JSON
, found
= found
))
1556 # Make sure rep obj with rep pg or ec obj with ec pg
1557 if ( 'shard_id' in jsondict
[ 1 ]) != ( pg
. find ( 's' ) > 0 ):
1559 if 'shard_id' in jsondict
[ 1 ]:
1560 # Fix shard_id since we only have one json instance for each object
1561 jsondict
[ 1 ][ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1562 JSON
= json
. dumps (( pg
, jsondict
[ 1 ]))
1563 OSDS
= get_osds ( pg
, OSDDIR
)
1565 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1566 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1567 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1570 afd
= open ( ATTRFILE
, "wb" )
1571 cmd
= ( CFSD_PREFIX
+ " ' {json} ' list-attrs" ). format ( osd
= osd
, json
= JSON
)
1573 ret
= call ( cmd
, shell
= True , stdout
= afd
)
1576 logging
. error ( "list-attrs failed with {ret} " . format ( ret
= ret
))
1579 keys
= get_lines ( ATTRFILE
)
1580 values
= dict ( db
[ nspace
][ basename
][ "xattr" ])
1582 if key
== "_" or key
== "snapset" or key
== "hinfo_key" :
1584 key
= key
. strip ( "_" )
1585 if key
not in values
:
1586 logging
. error ( "Unexpected key {key} present" . format ( key
= key
))
1589 exp
= values
. pop ( key
)
1590 vfd
= open ( VALFILE
, "wb" )
1591 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= "_" + key
)
1593 ret
= call ( cmd
, shell
= True , stdout
= vfd
)
1596 logging
. error ( "get-attr failed with {ret} " . format ( ret
= ret
))
1599 lines
= get_lines ( VALFILE
)
1602 logging
. error ( "For key {key} got value {got} instead of {expected} " . format ( key
= key
, got
= val
, expected
= exp
))
1604 if len ( values
) != 0 :
1605 logging
. error ( "Not all keys found, remaining keys:" )
1608 print ( "Test --op meta-list" )
1609 tmpfd
= open ( TMPFILE
, "wb" )
1610 cmd
= ( CFSD_PREFIX
+ "--op meta-list" ). format ( osd
= ONEOSD
)
1612 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1614 logging
. error ( "Bad exit status {ret} from --op meta-list request" . format ( ret
= ret
))
1617 print ( "Test get-bytes on meta" )
1619 lines
= get_lines ( TMPFILE
)
1620 JSONOBJ
= sorted ( set ( lines
))
1621 for JSON
in JSONOBJ
:
1622 ( pgid
, jsondict
) = json
. loads ( JSON
)
1624 logging
. error ( "pgid incorrect for --op meta-list {pgid} " . format ( pgid
= pgid
))
1626 if jsondict
[ 'namespace' ] != "" :
1627 logging
. error ( "namespace non null --op meta-list {ns} " . format ( ns
= jsondict
[ 'namespace' ]))
1634 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-bytes {fname} " ). format ( osd
= ONEOSD
, json
= JSON
, fname
= GETNAME
)
1636 ret
= call ( cmd
, shell
= True )
1638 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1650 print ( "Test pg info" )
1651 for pg
in ALLREPPGS
+ ALLECPGS
:
1652 for osd
in get_osds ( pg
, OSDDIR
):
1653 cmd
= ( CFSD_PREFIX
+ "--op info --pgid {pg} | grep ' \" pgid \" : \" {pg} \" '" ). format ( osd
= osd
, pg
= pg
)
1655 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1657 logging
. error ( "Getting info failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1660 print ( "Test pg logging" )
1661 if len ( ALLREPPGS
+ ALLECPGS
) == len ( OBJREPPGS
+ OBJECPGS
):
1662 logging
. warning ( "All PGs have objects, so no log without modify entries" )
1663 for pg
in ALLREPPGS
+ ALLECPGS
:
1664 for osd
in get_osds ( pg
, OSDDIR
):
1665 tmpfd
= open ( TMPFILE
, "wb" )
1666 cmd
= ( CFSD_PREFIX
+ "--op log --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1668 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1670 logging
. error ( "Getting log failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1672 HASOBJ
= pg
in OBJREPPGS
+ OBJECPGS
1674 for line
in get_lines ( TMPFILE
):
1675 if line
. find ( "modify" ) != - 1 :
1678 if HASOBJ
!= MODOBJ
:
1679 logging
. error ( "Bad log for pg {pg} from {osd} " . format ( pg
= pg
, osd
= osd
))
1680 MSG
= ( HASOBJ
and [ "" ] or [ "NOT " ])[ 0 ]
1681 print ( "Log should {msg} have a modify entry" . format ( msg
= MSG
))
1689 print ( "Test list-pgs" )
1690 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1692 CHECK_PGS
= get_osd_pgs ( os
. path
. join ( OSDDIR
, osd
), None )
1693 CHECK_PGS
= sorted ( CHECK_PGS
)
1695 cmd
= ( CFSD_PREFIX
+ "--op list-pgs" ). format ( osd
= osd
)
1697 TEST_PGS
= check_output ( cmd
, shell
= True ). split ( " \n " )
1698 TEST_PGS
= sorted ( TEST_PGS
)[ 1 :] # Skip extra blank line
1700 if TEST_PGS
!= CHECK_PGS
:
1701 logging
. error ( "list-pgs got wrong result for osd. {osd} " . format ( osd
= osd
))
1702 logging
. error ( "Expected {pgs} " . format ( pgs
= CHECK_PGS
))
1703 logging
. error ( "Got {pgs} " . format ( pgs
= TEST_PGS
))
1707 print ( "Test pg export --dry-run" )
1709 osd
= get_osds ( pg
, OSDDIR
)[ 0 ]
1710 fname
= "/tmp/fname. {pid} " . format ( pid
= pid
)
1711 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1713 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1715 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1717 elif os
. path
. exists ( fname
):
1718 logging
. error ( "Exporting --dry-run created file" )
1721 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1723 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1725 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1728 outdata
= get_lines ( fname
)
1729 if len ( outdata
) > 0 :
1730 logging
. error ( "Exporting --dry-run to stdout not empty" )
1731 logging
. error ( "Data: " + outdata
)
1735 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1736 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1737 print ( "Test pg export" )
1738 for pg
in ALLREPPGS
+ ALLECPGS
:
1739 for osd
in get_osds ( pg
, OSDDIR
):
1740 mydir
= os
. path
. join ( TESTDIR
, osd
)
1741 fname
= os
. path
. join ( mydir
, pg
)
1742 if pg
== ALLREPPGS
[ 0 ]:
1743 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1744 elif pg
== ALLREPPGS
[ 1 ]:
1745 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file - > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1747 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1749 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1751 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1754 ERRORS
+= EXP_ERRORS
1756 print ( "Test clear-data-digest" )
1757 for nspace
in db
. keys ():
1758 for basename
in db
[ nspace
]. keys ():
1759 JSON
= db
[ nspace
][ basename
][ 'json' ]
1760 cmd
= ( CFSD_PREFIX
+ "' {json} ' clear-data-digest" ). format ( osd
= 'osd0' , json
= JSON
)
1762 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1764 logging
. error ( "Clearing data digest failed for {json} " . format ( json
= JSON
))
1767 cmd
= ( CFSD_PREFIX
+ "' {json} ' dump | grep ' \" data_digest \" : \" 0xff'" ). format ( osd
= 'osd0' , json
= JSON
)
1769 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1771 logging
. error ( "Data digest not cleared for {json} " . format ( json
= JSON
))
1777 print ( "Test pg removal" )
1779 for pg
in ALLREPPGS
+ ALLECPGS
:
1780 for osd
in get_osds ( pg
, OSDDIR
):
1781 # This should do nothing
1782 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} --dry-run" ). format ( pg
= pg
, osd
= osd
)
1784 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1786 logging
. error ( "Removing --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1788 cmd
= ( CFSD_PREFIX
+ "--force --op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1790 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1792 logging
. error ( "Removing failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1798 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 :
1799 print ( "Test pg import" )
1800 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1801 dir = os
. path
. join ( TESTDIR
, osd
)
1802 PGS
= [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]
1804 file = os
. path
. join ( dir , pg
)
1805 # Make sure this doesn't crash
1806 cmd
= ( CFSD_PREFIX
+ "--op dump-export --file {file} " ). format ( osd
= osd
, file = file )
1808 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1810 logging
. error ( "Dump-export failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1812 # This should do nothing
1813 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} --dry-run" ). format ( osd
= osd
, file = file )
1815 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1817 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1820 cmd
= ( "cat {file} |" . format ( file = file ) + CFSD_PREFIX
+ "--op import" ). format ( osd
= osd
)
1822 cmd
= ( CFSD_PREFIX
+ "--op import --file - --pgid {pg} < {file} " ). format ( osd
= osd
, file = file , pg
= pg
)
1824 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} " ). format ( osd
= osd
, file = file )
1826 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1828 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1831 logging
. warning ( "SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES" )
1833 ERRORS
+= IMP_ERRORS
1836 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1837 print ( "Verify replicated import data" )
1838 data_errors
, _
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, REP_NAME
)
1839 ERRORS
+= data_errors
1841 logging
. warning ( "SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES" )
1843 print ( "Test all --op dump-journal again" )
1844 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1845 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1850 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1851 print ( "Verify erasure coded import data" )
1852 ERRORS
+= verify ( DATADIR
, EC_POOL
, EC_NAME
, db
)
1853 # Check replicated data/xattr/omap using rados
1854 print ( "Verify replicated import data using rados" )
1855 ERRORS
+= verify ( DATADIR
, REP_POOL
, REP_NAME
, db
)
1858 NEWPOOL
= "rados-import-pool"
1859 cmd
= " {path} /ceph osd pool create {pool} 8" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1861 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1863 print ( "Test rados import" )
1865 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1866 dir = os
. path
. join ( TESTDIR
, osd
)
1867 for pg
in [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]:
1868 if pg
. find ( " {id} ." . format ( id = REPID
)) != 0 :
1870 file = os
. path
. join ( dir , pg
)
1873 # This should do nothing
1874 cmd
= " {path} /rados import -p {pool} --dry-run {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1876 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1878 logging
. error ( "Rados import --dry-run failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1880 cmd
= " {path} /rados -p {pool} ls" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1882 data
= check_output ( cmd
, shell
= True )
1884 logging
. error ( "' {data} '" . format ( data
= data
))
1885 logging
. error ( "Found objects after dry-run" )
1887 cmd
= " {path} /rados import -p {pool} {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1889 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1891 logging
. error ( "Rados import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1893 cmd
= " {path} /rados import -p {pool} --no-overwrite {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1895 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1897 logging
. error ( "Rados import --no-overwrite failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1900 ERRORS
+= verify ( DATADIR
, NEWPOOL
, REP_NAME
, db
)
1902 logging
. warning ( "SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES" )
1904 # Clear directories of previous portion
1905 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1906 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1910 # Cause SPLIT_POOL to split and test import with object/log filtering
1911 print ( "Testing import all objects after a split" )
1912 SPLIT_POOL
= "split_pool"
1915 SPLIT_NSPACE_COUNT
= 2
1916 SPLIT_NAME
= "split"
1917 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= SPLIT_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
1919 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1920 SPLITID
= get_pool_id ( SPLIT_POOL
, nullfd
)
1921 pool_size
= int ( check_output ( " {path} /ceph osd pool get {pool} size" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
), shell
= True , stderr
= nullfd
). split ( " " )[ 1 ])
1926 objects
= range ( 1 , SPLIT_OBJ_COUNT
+ 1 )
1927 nspaces
= range ( SPLIT_NSPACE_COUNT
)
1929 nspace
= get_nspace ( n
)
1932 NAME
= SPLIT_NAME
+ " {num} " . format ( num
= i
)
1933 LNAME
= nspace
+ "-" + NAME
1934 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
1937 cmd
= "rm -f " + DDNAME
1939 call ( cmd
, shell
= True )
1942 dataline
= range ( DATALINECOUNT
)
1945 fd
= open ( DDNAME
, "w" )
1946 data
= "This is the split data for " + LNAME
+ " \n "
1951 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= SPLIT_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
1953 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
1955 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
1961 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1962 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1964 pg
= " {pool} .0" . format ( pool
= SPLITID
)
1967 export_osds
= get_osds ( pg
, OSDDIR
)
1968 for osd
in export_osds
:
1969 mydir
= os
. path
. join ( TESTDIR
, osd
)
1970 fname
= os
. path
. join ( mydir
, pg
)
1971 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1973 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1975 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1978 ERRORS
+= EXP_ERRORS
1984 cmd
= " {path} /ceph osd pool set {pool} pg_num 2" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
)
1986 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1992 # Now 2 PGs, poolid.0 and poolid.1
1993 # make note of pgs before we remove the pgs...
1994 osds
= get_osds ( " {pool} .0" . format ( pool
= SPLITID
), OSDDIR
);
1995 for seed
in range ( 2 ):
1996 pg
= " {pool} . {seed} " . format ( pool
= SPLITID
, seed
= seed
)
1999 cmd
= ( CFSD_PREFIX
+ "--force --op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
2001 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
2005 # This is weird. The export files are based on only the EXPORT_PG
2006 # and where that pg was before the split. Use 'which' to use all
2007 # export copies in import.
2008 mydir
= os
. path
. join ( TESTDIR
, export_osds
[ which
])
2009 fname
= os
. path
. join ( mydir
, EXPORT_PG
)
2011 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= EXPORT_PG
, file = fname
)
2013 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
2015 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
2018 ERRORS
+= IMP_ERRORS
2020 # Start up again to make sure imports didn't corrupt anything
2022 print ( "Verify split import data" )
2023 data_errors
, count
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
)
2024 ERRORS
+= data_errors
2025 if count
!= ( SPLIT_OBJ_COUNT
* SPLIT_NSPACE_COUNT
* pool_size
):
2026 logging
. error ( "Incorrect number of replicas seen {count} " . format ( count
= count
))
2031 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
2032 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
2034 ERRORS
+= test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
)
2036 # vstart() starts 4 OSDs
2037 ERRORS
+= test_get_set_osdmap ( CFSD_PREFIX
, list ( range ( 4 )), ALLOSDS
)
2038 ERRORS
+= test_get_set_inc_osdmap ( CFSD_PREFIX
, ALLOSDS
[ 0 ])
2041 CORES
= [ f
for f
in os
. listdir ( CEPH_DIR
) if f
. startswith ( "core." )]
2043 CORE_DIR
= os
. path
. join ( "/tmp" , "cores. {pid} " . format ( pid
= os
. getpid ()))
2045 call ( "/bin/mv {ceph_dir} /core.* {core_dir} " . format ( ceph_dir
= CEPH_DIR
, core_dir
= CORE_DIR
), shell
= True )
2046 logging
. error ( "Failure due to cores found" )
2047 logging
. error ( "See {core_dir} for cores" . format ( core_dir
= CORE_DIR
))
2048 ERRORS
+= len ( CORES
)
2051 print ( "TEST PASSED" )
2054 print ( "TEST FAILED WITH {errcount} ERRORS" . format ( errcount
= ERRORS
))
2058 def remove_btrfs_subvolumes ( path
):
2059 if platform
. system () == "FreeBSD" :
2061 result
= subprocess
. Popen ( "stat -f -c ' %% T' %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
2062 for line
in result
. stdout
:
2063 filesystem
= decode ( line
). rstrip ( ' \n ' )
2064 if filesystem
== "btrfs" :
2065 result
= subprocess
. Popen ( "sudo btrfs subvolume list %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
2066 for line
in result
. stdout
:
2067 subvolume
= decode ( line
). split ()[ 8 ]
2068 # extracting the relative volume name
2069 m
= re
. search ( ".*( %s .*)" % path
, subvolume
)
2072 call ( "sudo btrfs subvolume delete %s " % found
, shell
= True )
2075 if __name__
== "__main__" :
2078 status
= main ( sys
. argv
[ 1 :])
2081 os
. chdir ( CEPH_BUILD_DIR
)
2082 remove_btrfs_subvolumes ( CEPH_DIR
)
2083 call ( "/bin/rm -fr {dir} " . format ( dir = CEPH_DIR
), shell
= True )