]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/ceph_objectstore_tool.py
3 from __future__
import print_function
4 from subprocess
import call
6 from subprocess
import check_output
8 def check_output (* popenargs
, ** kwargs
):
10 # backported from python 2.7 stdlib
11 process
= subprocess
. Popen (
12 stdout
= subprocess
. PIPE
, * popenargs
, ** kwargs
)
13 output
, unused_err
= process
. communicate ()
14 retcode
= process
. poll ()
16 cmd
= kwargs
. get ( "args" )
19 error
= subprocess
. CalledProcessError ( retcode
, cmd
)
37 from subprocess
import DEVNULL
39 DEVNULL
= open ( os
. devnull
, "wb" )
41 logging
. basicConfig ( format
= ' %(levelname)s : %(message)s ' , level
= logging
. WARNING
)
44 if sys
. version_info
[ 0 ] >= 3 :
46 return s
. decode ( 'utf-8' )
48 def check_output (* args
, ** kwargs
):
49 return decode ( subprocess
. check_output (* args
, ** kwargs
))
56 def wait_for_health ():
57 print ( "Wait for health_ok..." , end
= "" )
59 while call ( " {path} /ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null" . format ( path
= CEPH_BIN
), shell
= True ) == 0 :
62 raise Exception ( "Time exceeded to go to health" )
67 def get_pool_id ( name
, nullfd
):
68 cmd
= " {path} /ceph osd pool stats {pool} " . format ( pool
= name
, path
= CEPH_BIN
). split ()
69 # pool {pool} id # .... grab the 4 field
70 return check_output ( cmd
, stderr
= nullfd
). split ()[ 3 ]
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs ( SUBDIR
, ID
):
77 endhead
= re
. compile ( " {id} .*_head$" . format ( id = ID
))
78 DIR
= os
. path
. join ( SUBDIR
, "current" )
79 PGS
+= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and ( ID
is None or endhead
. match ( f
))]
80 PGS
= [ re
. sub ( "_head" , "" , p
) for p
in PGS
if "_head" in p
]
84 # return a sorted list of unique PGs given a directory
86 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
89 SUBDIR
= os
. path
. join ( DIR
, d
)
90 PGS
+= get_osd_pgs ( SUBDIR
, ID
)
91 return sorted ( set ( PGS
))
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs ( ALLPGS
, prefix
, DIR
, ID
):
96 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
99 DIRL2
= os
. path
. join ( DIR
, d
)
100 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
103 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
105 FINALDIR
= os
. path
. join ( SUBDIR
, PGDIR
)
106 # See if there are any objects there
107 if any ( f
for f
in [ val
for _
, _
, fl
in os
. walk ( FINALDIR
) for val
in fl
] if f
. startswith ( prefix
)):
109 return sorted ( set ( PGS
))
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds ( PG
, DIR
):
114 ALLOSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
117 DIRL2
= os
. path
. join ( DIR
, d
)
118 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
120 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
126 def get_lines ( filename
):
127 tmpfd
= open ( filename
, "r" )
131 line
= tmpfd
. readline (). rstrip ( ' \n ' )
139 def cat_file ( level
, filename
):
140 if level
< logging
. getLogger (). getEffectiveLevel ():
142 print ( "File: " + filename
)
143 with
open ( filename
, "r" ) as f
:
145 line
= f
. readline (). rstrip ( ' \n ' )
152 def vstart ( new
, opt
= "" ):
153 print ( "vstarting...." , end
= "" )
154 NEW
= new
and "-n" or "-N"
155 call ( "MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path} /src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1" . format ( new
= NEW
, opt
= opt
, path
= CEPH_ROOT
), shell
= True )
159 def test_failure ( cmd
, errmsg
, tty
= False ):
162 ttyfd
= open ( "/dev/tty" , "rwb" )
163 except Exception as e
:
165 logging
. info ( "SKIP " + cmd
)
167 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
168 tmpfd
= open ( TMPFILE
, "wb" )
172 ret
= call ( cmd
, shell
= True , stdin
= ttyfd
, stdout
= ttyfd
, stderr
= tmpfd
)
175 ret
= call ( cmd
, shell
= True , stderr
= tmpfd
)
179 logging
. error ( "Should have failed, but got exit 0" )
181 lines
= get_lines ( TMPFILE
)
182 matched
= [ l
for l
in lines
if errmsg
in l
]
184 logging
. info ( "Correctly failed with message \" " + matched
[ 0 ] + " \" " )
187 logging
. error ( "Command: " + cmd
)
188 logging
. error ( "Bad messages to stderr \" " + str ( lines
) + " \" " )
189 logging
. error ( "Expected \" " + errmsg
+ " \" " )
196 return "ns {num} " . format ( num
= num
)
199 def verify ( DATADIR
, POOL
, NAME_PREFIX
, db
):
200 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
202 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( NAME_PREFIX
) == 0 ]:
203 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
204 clone
= rawnsfile
. split ( "__" )[ 1 ]
205 nspace
= nsfile
. split ( "-" )[ 0 ]
206 file = nsfile
. split ( "-" )[ 1 ]
210 path
= os
. path
. join ( DATADIR
, rawnsfile
)
215 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' get {file} {out} " . format ( pool
= POOL
, file = file , out
= TMPFILE
, nspace
= nspace
, path
= CEPH_BIN
)
217 call ( cmd
, shell
= True , stdout
= DEVNULL
, stderr
= DEVNULL
)
218 cmd
= "diff -q {src} {result} " . format ( src
= path
, result
= TMPFILE
)
220 ret
= call ( cmd
, shell
= True )
222 logging
. error ( " {file} data not imported properly" . format ( file = file ))
228 for key
, val
in db
[ nspace
][ file ][ "xattr" ]. items ():
229 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getxattr {name} {key} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, path
= CEPH_BIN
)
231 getval
= check_output ( cmd
, shell
= True , stderr
= DEVNULL
)
232 logging
. debug ( "getxattr {key} {val} " . format ( key
= key
, val
= getval
))
234 logging
. error ( "getxattr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= key
, get
= getval
, orig
= val
))
237 hdr
= db
[ nspace
][ file ]. get ( "omapheader" , "" )
238 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapheader {name} {file} " . format ( pool
= POOL
, name
= file , nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
240 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
242 logging
. error ( "rados getomapheader returned {ret} " . format ( ret
= ret
))
245 getlines
= get_lines ( TMPFILE
)
246 assert ( len ( getlines
) == 0 or len ( getlines
) == 1 )
247 if len ( getlines
) == 0 :
251 logging
. debug ( "header: {hdr} " . format ( hdr
= gethdr
))
253 logging
. error ( "getomapheader returned wrong val: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
255 for key
, val
in db
[ nspace
][ file ][ "omap" ]. items ():
256 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapval {name} {key} {file} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
258 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
260 logging
. error ( "getomapval returned {ret} " . format ( ret
= ret
))
263 getlines
= get_lines ( TMPFILE
)
264 if len ( getlines
) != 1 :
265 logging
. error ( "Bad data from getomapval {lines} " . format ( lines
= getlines
))
269 logging
. debug ( "getomapval {key} {val} " . format ( key
= key
, val
= getval
))
271 logging
. error ( "getomapval returned wrong val: {get} instead of {orig} " . format ( get
= getval
, orig
= val
))
280 def check_journal ( jsondict
):
282 if 'header' not in jsondict
:
283 logging
. error ( "Key 'header' not in dump-journal" )
285 elif 'max_size' not in jsondict
[ 'header' ]:
286 logging
. error ( "Key 'max_size' not in dump-journal header" )
289 print ( " \t Journal max_size = {size} " . format ( size
= jsondict
[ 'header' ][ 'max_size' ]))
290 if 'entries' not in jsondict
:
291 logging
. error ( "Key 'entries' not in dump-journal output" )
293 elif len ( jsondict
[ 'entries' ]) == 0 :
294 logging
. info ( "No entries in journal found" )
296 errors
+= check_journal_entries ( jsondict
[ 'entries' ])
300 def check_journal_entries ( entries
):
302 for enum
in range ( len ( entries
)):
303 if 'offset' not in entries
[ enum
]:
304 logging
. error ( "No 'offset' key in entry {e} " . format ( e
= enum
))
306 if 'seq' not in entries
[ enum
]:
307 logging
. error ( "No 'seq' key in entry {e} " . format ( e
= enum
))
309 if 'transactions' not in entries
[ enum
]:
310 logging
. error ( "No 'transactions' key in entry {e} " . format ( e
= enum
))
312 elif len ( entries
[ enum
][ 'transactions' ]) == 0 :
313 logging
. error ( "No transactions found in entry {e} " . format ( e
= enum
))
316 errors
+= check_entry_transactions ( entries
[ enum
], enum
)
320 def check_entry_transactions ( entry
, enum
):
322 for tnum
in range ( len ( entry
[ 'transactions' ])):
323 if 'trans_num' not in entry
[ 'transactions' ][ tnum
]:
324 logging
. error ( "Key 'trans_num' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
326 elif entry
[ 'transactions' ][ tnum
][ 'trans_num' ] != tnum
:
327 ft
= entry
[ 'transactions' ][ tnum
][ 'trans_num' ]
328 logging
. error ( "Bad trans_num ( {ft} ) entry {e} trans {t} " . format ( ft
= ft
, e
= enum
, t
= tnum
))
330 if 'ops' not in entry
[ 'transactions' ][ tnum
]:
331 logging
. error ( "Key 'ops' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
334 errors
+= check_transaction_ops ( entry
[ 'transactions' ][ tnum
][ 'ops' ], enum
, tnum
)
338 def check_transaction_ops ( ops
, enum
, tnum
):
340 logging
. warning ( "No ops found in entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
342 for onum
in range ( len ( ops
)):
343 if 'op_num' not in ops
[ onum
]:
344 logging
. error ( "Key 'op_num' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
346 elif ops
[ onum
][ 'op_num' ] != onum
:
347 fo
= ops
[ onum
][ 'op_num' ]
348 logging
. error ( "Bad op_num ( {fo} ) from entry {e} trans {t} op {o} " . format ( fo
= fo
, e
= enum
, t
= tnum
, o
= onum
))
350 if 'op_name' not in ops
[ onum
]:
351 logging
. error ( "Key 'op_name' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
356 def test_dump_journal ( CFSD_PREFIX
, osds
):
359 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
362 # Test --op dump-journal by loading json
363 cmd
= ( CFSD_PREFIX
+ "--op dump-journal --format json" ). format ( osd
= osd
)
365 tmpfd
= open ( TMPFILE
, "wb" )
366 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
368 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
372 tmpfd
= open ( TMPFILE
, "r" )
373 jsondict
= json
. load ( tmpfd
)
377 journal_errors
= check_journal ( jsondict
)
378 if journal_errors
is not 0 :
379 logging
. error ( jsondict
)
380 ERRORS
+= journal_errors
384 CEPH_BUILD_DIR
= os
. environ
. get ( 'CEPH_BUILD_DIR' )
385 CEPH_BIN
= os
. environ
. get ( 'CEPH_BIN' )
386 CEPH_ROOT
= os
. environ
. get ( 'CEPH_ROOT' )
388 if not CEPH_BUILD_DIR
:
389 CEPH_BUILD_DIR
= os
. getcwd ()
390 os
. putenv ( 'CEPH_BUILD_DIR' , CEPH_BUILD_DIR
)
391 CEPH_BIN
= CEPH_BUILD_DIR
392 os
. putenv ( 'CEPH_BIN' , CEPH_BIN
)
393 CEPH_ROOT
= os
. path
. dirname ( CEPH_BUILD_DIR
)
394 os
. putenv ( 'CEPH_ROOT' , CEPH_ROOT
)
395 CEPH_LIB
= os
. path
. join ( CEPH_BIN
, '.libs' )
396 os
. putenv ( 'CEPH_LIB' , CEPH_LIB
)
398 CEPH_DIR
= CEPH_BUILD_DIR
+ "/cot_dir"
399 CEPH_CONF
= os
. path
. join ( CEPH_DIR
, 'ceph.conf' )
402 call ( " {path} /init-ceph -c {conf} stop > /dev/null 2>&1" . format ( conf
= CEPH_CONF
, path
= CEPH_BIN
), shell
= True )
405 def check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
):
408 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( SPLIT_NAME
) == 0 ]:
409 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
410 clone
= rawnsfile
. split ( "__" )[ 1 ]
411 nspace
= nsfile
. split ( "-" )[ 0 ]
412 file = nsfile
. split ( "-" )[ 1 ] + "__" + clone
416 path
= os
. path
. join ( DATADIR
, rawnsfile
)
417 tmpfd
= open ( TMPFILE
, "wb" )
418 cmd
= "find {dir} -name ' {file} _*_ {nspace} _*'" . format ( dir = OSDDIR
, file = file , nspace
= nspace
)
420 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
422 logging
. critical ( "INTERNAL ERROR" )
425 obj_locs
= get_lines ( TMPFILE
)
426 if len ( obj_locs
) == 0 :
427 logging
. error ( "Can't find imported object {name} " . format ( name
= file ))
429 for obj_loc
in obj_locs
:
430 # For btrfs skip snap_* dirs
431 if re
. search ( "/snap_[0-9]*/" , obj_loc
) is not None :
434 cmd
= "diff -q {src} {obj_loc} " . format ( src
= path
, obj_loc
= obj_loc
)
436 ret
= call ( cmd
, shell
= True )
438 logging
. error ( " {file} data not imported properly into {obj} " . format ( file = file , obj
= obj_loc
))
440 return ERRORS
, repcount
443 def set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
444 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
445 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
446 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
447 osdmap_file
= osdmap_file
. name
)
448 output
= check_output ( cmd
, shell
= True )
449 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
451 new_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
452 old_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
453 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
454 crush_file
= old_crush_file
. name
, path
= CEPH_BIN
),
460 for osd_id
in osd_ids
:
461 cmd
= " {path} /crushtool -i {crush_file} --reweight-item osd. {osd} {weight} -o {new_crush_file} " . format ( osd
= osd_id
,
462 crush_file
= old_crush_file
. name
,
464 new_crush_file
= new_crush_file
. name
, path
= CEPH_BIN
)
465 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
467 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
469 # change them back, since we don't need to preapre for another round
470 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
471 old_crush_file
. close ()
473 ret
= call ( " {path} /osdmaptool --import-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
474 crush_file
= new_crush_file
. name
, path
= CEPH_BIN
),
480 # Minimum test of --dry-run by using it, but not checking anything
481 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
482 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
483 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
486 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
487 # to use use a different epoch than the one in osdmap
488 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
489 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
490 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
494 def get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
):
495 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
496 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
497 osdmap_file
= osdmap_file
. name
)
498 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
501 # we have to read the weights from the crush map, even we can query the weights using
502 # osdmaptool, but please keep in mind, they are different:
503 # item weights in crush map versus weight associated with each osd in osdmap
504 crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
505 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
506 crush_file
= crush_file
. name
, path
= CEPH_BIN
),
510 output
= check_output ( " {path} /crushtool --tree -i {crush_file} | tail -n {num_osd} " . format ( crush_file
= crush_file
. name
,
511 num_osd
= len ( osd_ids
), path
= CEPH_BIN
),
515 for line
in output
. strip (). split ( ' \n ' ):
517 linev
= re
. split ( '\s+' , line
)
520 print ( 'linev %s ' % linev
)
521 weights
. append ( float ( linev
[ 1 ]))
526 def test_get_set_osdmap ( CFSD_PREFIX
, osd_ids
, osd_paths
):
527 print ( "Testing get-osdmap and set-osdmap" )
530 weight
= 1 / math
. e
# just some magic number in [0, 1]
532 for osd_path
in osd_paths
:
533 if set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
534 changed
. append ( osd_path
)
536 logging
. warning ( "Failed to change the weights: {0} " . format ( osd_path
))
537 # i am pissed off if none of the store gets changed
541 for osd_path
in changed
:
542 weights
= get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
)
546 if any ( abs ( w
- weight
) > 1e-5 for w
in weights
):
547 logging
. warning ( "Weight is not changed: {0} != {1} " . format ( weights
, weight
))
551 def test_get_set_inc_osdmap ( CFSD_PREFIX
, osd_path
):
552 # incrementals are not used unless we need to build an MOSDMap to update
553 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
554 # with a different copy, and read it back to see if it matches.
556 file_e2
= tempfile
. NamedTemporaryFile ( delete
= True )
557 cmd
= ( CFSD_PREFIX
+ "--op get-inc-osdmap --file {file} " ). format ( osd
= osd_path
,
559 output
= check_output ( cmd
, shell
= True )
560 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
561 # backup e1 incremental before overwriting it
563 file_e1_backup
= tempfile
. NamedTemporaryFile ( delete
= True )
564 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
565 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
567 # overwrite e1 with e2
568 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --force --epoch {epoch} --file {file} "
569 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e2
. name
), shell
= True )
571 # Use dry-run to set back to e1 which shouldn't happen
572 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file} "
573 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
576 file_e1_read
= tempfile
. NamedTemporaryFile ( delete
= True )
577 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
578 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_read
. name
), shell
= True )
582 if not filecmp
. cmp ( file_e2
. name
, file_e1_read
. name
, shallow
= False ):
583 logging
. error ( "{{get,set}}-inc-osdmap mismatch {0} != {1} " . format ( file_e2
. name
, file_e1_read
. name
))
586 # revert the change with file_e1_backup
587 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --epoch {epoch} --file {file} "
588 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
590 logging
. error ( "Failed to revert the changed inc-osdmap" )
596 def test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
):
598 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
599 nullfd
= open ( os
. devnull
, "w" )
601 print ( "Test removeall" )
603 for nspace
in db
. keys ():
604 for basename
in db
[ nspace
]. keys ():
605 JSON
= db
[ nspace
][ basename
][ 'json' ]
607 OSDS
= get_osds ( pg
, OSDDIR
)
609 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
610 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
611 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
615 if int ( basename
. split ( REP_NAME
)[ 1 ]) <= int ( NUM_CLONED_REP_OBJECTS
):
616 cmd
= ( CFSD_PREFIX
+ "' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
617 errors
+= test_failure ( cmd
, "Snapshots are present, use removeall to delete everything" )
619 cmd
= ( CFSD_PREFIX
+ " --force --dry-run ' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
621 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
623 logging
. error ( "remove with --force failed for {json} " . format ( json
= JSON
))
626 cmd
= ( CFSD_PREFIX
+ " --dry-run ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
628 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
630 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
633 cmd
= ( CFSD_PREFIX
+ " ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
635 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
637 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
640 tmpfd
= open ( TMPFILE
, "w" )
641 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --namespace {ns} {name} " ). format ( osd
= osd
, pg
= pg
, ns
= nspace
, name
= basename
)
643 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
645 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
648 lines
= get_lines ( TMPFILE
)
650 logging
. error ( "Removeall didn't remove all objects {ns} / {name} : {lines} " . format ( ns
= nspace
, name
= basename
, lines
= lines
))
654 cmd
= " {path} /rados -p {pool} rmsnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
656 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
658 logging
. error ( "rados rmsnap failed" )
666 if sys
. version_info
[ 0 ] < 3 :
667 sys
. stdout
= stdout
= os
. fdopen ( sys
. stdout
. fileno (), 'wb' , 0 )
669 stdout
= sys
. stdout
. buffer
670 if len ( argv
) > 1 and argv
[ 1 ] == "debug" :
675 call ( "rm -fr {dir} ; mkdir {dir} " . format ( dir = CEPH_DIR
), shell
= True )
676 os
. environ
[ "CEPH_DIR" ] = CEPH_DIR
677 OSDDIR
= os
. path
. join ( CEPH_DIR
, "dev" )
678 REP_POOL
= "rep_pool"
679 REP_NAME
= "REPobject"
682 if len ( argv
) > 0 and argv
[ 0 ] == 'large' :
684 NUM_REP_OBJECTS
= 800
685 NUM_CLONED_REP_OBJECTS
= 100
688 # Larger data sets for first object per namespace
689 DATALINECOUNT
= 50000
690 # Number of objects to do xattr/omap testing on
695 NUM_CLONED_REP_OBJECTS
= 2
698 # Larger data sets for first object per namespace
700 # Number of objects to do xattr/omap testing on
704 TESTDIR
= "/tmp/test. {pid} " . format ( pid
= pid
)
705 DATADIR
= "/tmp/data. {pid} " . format ( pid
= pid
)
706 CFSD_PREFIX
= CEPH_BIN
+ "/ceph-objectstore-tool --data-path " + OSDDIR
+ "/ {osd} "
707 PROFNAME
= "testecprofile"
709 os
. environ
[ 'CEPH_CONF' ] = CEPH_CONF
713 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= REP_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
715 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
716 REPID
= get_pool_id ( REP_POOL
, nullfd
)
718 print ( "Created Replicated pool # {repid} " . format ( repid
= REPID
))
720 cmd
= " {path} /ceph osd erasure-code-profile set {prof} crush-failure-domain=osd" . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
722 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
723 cmd
= " {path} /ceph osd erasure-code-profile get {prof} " . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
725 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
726 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} erasure {prof} " . format ( pool
= EC_POOL
, prof
= PROFNAME
, pg
= PG_COUNT
, path
= CEPH_BIN
)
728 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
729 ECID
= get_pool_id ( EC_POOL
, nullfd
)
731 print ( "Created Erasure coded pool # {ecid} " . format ( ecid
= ECID
))
733 print ( "Creating {objs} objects in replicated pool" . format ( objs
=( NUM_REP_OBJECTS
* NUM_NSPACES
)))
734 cmd
= "mkdir -p {datadir} " . format ( datadir
= DATADIR
)
736 call ( cmd
, shell
= True )
740 objects
= range ( 1 , NUM_REP_OBJECTS
+ 1 )
741 nspaces
= range ( NUM_NSPACES
)
743 nspace
= get_nspace ( n
)
748 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
749 LNAME
= nspace
+ "-" + NAME
750 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
753 cmd
= "rm -f " + DDNAME
755 call ( cmd
, shell
= True )
758 dataline
= range ( DATALINECOUNT
)
761 fd
= open ( DDNAME
, "w" )
762 data
= "This is the replicated data for " + LNAME
+ " \n "
767 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
769 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
771 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
774 db
[ nspace
][ NAME
] = {}
776 if i
< ATTR_OBJS
+ 1 :
780 db
[ nspace
][ NAME
][ "xattr" ] = {}
784 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
785 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
786 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
788 ret
= call ( cmd
, shell
= True )
790 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
792 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
794 # Create omap header in all objects but REPobject1
795 if i
< ATTR_OBJS
+ 1 and i
!= 1 :
796 myhdr
= "hdr {i} " . format ( i
= i
)
797 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapheader {name} {hdr} " . format ( pool
= REP_POOL
, name
= NAME
, hdr
= myhdr
, nspace
= nspace
, path
= CEPH_BIN
)
799 ret
= call ( cmd
, shell
= True )
801 logging
. critical ( "setomapheader failed with {ret} " . format ( ret
= ret
))
803 db
[ nspace
][ NAME
][ "omapheader" ] = myhdr
805 db
[ nspace
][ NAME
][ "omap" ] = {}
809 mykey
= "okey {i} - {k} " . format ( i
= i
, k
= k
)
810 myval
= "oval {i} - {k} " . format ( i
= i
, k
= k
)
811 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapval {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
813 ret
= call ( cmd
, shell
= True )
815 logging
. critical ( "setomapval failed with {ret} " . format ( ret
= ret
))
816 db
[ nspace
][ NAME
][ "omap" ][ mykey
] = myval
819 cmd
= " {path} /rados -p {pool} mksnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
821 call ( cmd
, shell
= True )
823 objects
= range ( 1 , NUM_CLONED_REP_OBJECTS
+ 1 )
824 nspaces
= range ( NUM_NSPACES
)
826 nspace
= get_nspace ( n
)
829 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
830 LNAME
= nspace
+ "-" + NAME
831 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
833 CLONENAME
= DDNAME
+ "__1"
836 cmd
= "mv -f " + DDNAME
+ " " + CLONENAME
838 call ( cmd
, shell
= True )
841 dataline
= range ( DATALINECOUNT
)
844 fd
= open ( DDNAME
, "w" )
845 data
= "This is the replicated data after a snapshot for " + LNAME
+ " \n "
850 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
852 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
854 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
857 print ( "Creating {objs} objects in erasure coded pool" . format ( objs
=( NUM_EC_OBJECTS
* NUM_NSPACES
)))
859 objects
= range ( 1 , NUM_EC_OBJECTS
+ 1 )
860 nspaces
= range ( NUM_NSPACES
)
862 nspace
= get_nspace ( n
)
865 NAME
= EC_NAME
+ " {num} " . format ( num
= i
)
866 LNAME
= nspace
+ "-" + NAME
867 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
870 cmd
= "rm -f " + DDNAME
872 call ( cmd
, shell
= True )
875 dataline
= range ( DATALINECOUNT
)
878 fd
= open ( DDNAME
, "w" )
879 data
= "This is the erasure coded data for " + LNAME
+ " \n "
884 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= EC_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
886 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
888 logging
. critical ( "Erasure coded pool creation failed with {ret} " . format ( ret
= ret
))
891 db
[ nspace
][ NAME
] = {}
893 db
[ nspace
][ NAME
][ "xattr" ] = {}
894 if i
< ATTR_OBJS
+ 1 :
901 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
902 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
903 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= EC_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
905 ret
= call ( cmd
, shell
= True )
907 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
909 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
911 # Omap isn't supported in EC pools
912 db
[ nspace
][ NAME
][ "omap" ] = {}
919 logging
. critical ( "Unable to set up test" )
922 ALLREPPGS
= get_pgs ( OSDDIR
, REPID
)
923 logging
. debug ( ALLREPPGS
)
924 ALLECPGS
= get_pgs ( OSDDIR
, ECID
)
925 logging
. debug ( ALLECPGS
)
927 OBJREPPGS
= get_objs ( ALLREPPGS
, REP_NAME
, OSDDIR
, REPID
)
928 logging
. debug ( OBJREPPGS
)
929 OBJECPGS
= get_objs ( ALLECPGS
, EC_NAME
, OSDDIR
, ECID
)
930 logging
. debug ( OBJECPGS
)
934 osds
= get_osds ( ONEPG
, OSDDIR
)
936 logging
. debug ( ONEOSD
)
938 print ( "Test invalid parameters" )
939 # On export can't use stdout to a terminal
940 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
941 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
943 # On export can't use stdout to a terminal
944 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
945 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
947 # Prep a valid ec export file for import failure tests
948 ONEECPG
= ALLECPGS
[ 0 ]
949 osds
= get_osds ( ONEECPG
, OSDDIR
)
951 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
952 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= ONEECPG
, file = OTHERFILE
)
954 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
956 # On import can't specify a different shard
957 BADPG
= ONEECPG
. split ( 's' )[ 0 ] + "s10"
958 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= BADPG
, file = OTHERFILE
)
959 ERRORS
+= test_failure ( cmd
, "Can't specify a different shard, must be" )
963 # Prep a valid export file for import failure tests
964 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
965 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
967 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
969 # On import can't specify a PG with a non-existent pool
970 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= "10.0" , file = OTHERFILE
)
971 ERRORS
+= test_failure ( cmd
, "Can't specify a different pgid pool, must be" )
973 # On import can't specify shard for a replicated export
974 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} s0 --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
975 ERRORS
+= test_failure ( cmd
, "Can't specify a sharded pgid with a non-sharded export" )
977 # On import can't specify a PG with a bad seed
978 TMPPG
= " {pool} .80" . format ( pool
= REPID
)
979 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= TMPPG
, file = OTHERFILE
)
980 ERRORS
+= test_failure ( cmd
, "Illegal pgid, the seed is larger than current pg_num" )
983 cmd
= ( CFSD_PREFIX
+ "--op import --file {FOO} " ). format ( osd
= ONEOSD
, FOO
= OTHERFILE
)
984 ERRORS
+= test_failure ( cmd
, "file: {FOO} : No such file or directory" . format ( FOO
= OTHERFILE
))
986 cmd
= " {path} /ceph-objectstore-tool --data-path BAD_DATA_PATH --op list" . format ( osd
= ONEOSD
, path
= CEPH_BIN
)
987 ERRORS
+= test_failure ( cmd
, "data-path: BAD_DATA_PATH: No such file or directory" )
989 cmd
= " {path} /ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal" . format ( path
= CEPH_BIN
)
990 ERRORS
+= test_failure ( cmd
, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory" )
992 # On import can't use stdin from a terminal
993 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
994 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
996 # On import can't use stdin from a terminal
997 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
998 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1000 # Specify a bad --type
1001 os
. mkdir ( OSDDIR
+ "/fakeosd" )
1002 cmd
= ( " {path} /ceph-objectstore-tool --data-path " + OSDDIR
+ "/ {osd} --type foobar --op list --pgid {pg} " ). format ( osd
= "fakeosd" , pg
= ONEPG
, path
= CEPH_BIN
)
1003 ERRORS
+= test_failure ( cmd
, "Unable to create store of type foobar" )
1005 # Don't specify a data-path
1006 cmd
= " {path} /ceph-objectstore-tool --type memstore --op list --pgid {pg} " . format ( dir = OSDDIR
, osd
= ONEOSD
, pg
= ONEPG
, path
= CEPH_BIN
)
1007 ERRORS
+= test_failure ( cmd
, "Must provide --data-path" )
1009 cmd
= ( CFSD_PREFIX
+ "--op remove" ). format ( osd
= ONEOSD
)
1010 ERRORS
+= test_failure ( cmd
, "Must provide pgid" )
1012 # Don't secify a --op nor object command
1013 cmd
= CFSD_PREFIX
. format ( osd
= ONEOSD
)
1014 ERRORS
+= test_failure ( cmd
, "Must provide --op or object command..." )
1016 # Specify a bad --op command
1017 cmd
= ( CFSD_PREFIX
+ "--op oops" ). format ( osd
= ONEOSD
)
1018 ERRORS
+= test_failure ( cmd
, "Must provide --op (info, log, remove, mkfs, fsck, export, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)" )
1020 # Provide just the object param not a command
1021 cmd
= ( CFSD_PREFIX
+ "object" ). format ( osd
= ONEOSD
)
1022 ERRORS
+= test_failure ( cmd
, "Invalid syntax, missing command" )
1024 # Provide an object name that doesn't exist
1025 cmd
= ( CFSD_PREFIX
+ "NON_OBJECT get-bytes" ). format ( osd
= ONEOSD
)
1026 ERRORS
+= test_failure ( cmd
, "No object id 'NON_OBJECT' found" )
1028 # Provide an invalid object command
1029 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} '' notacommand" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1030 ERRORS
+= test_failure ( cmd
, "Unknown object command 'notacommand'" )
1032 cmd
= ( CFSD_PREFIX
+ "foo list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1033 ERRORS
+= test_failure ( cmd
, "No object id 'foo' found or invalid JSON specified" )
1035 cmd
= ( CFSD_PREFIX
+ "'{{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }}' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1036 ERRORS
+= test_failure ( cmd
, "Without --pgid the object '{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }' must be a JSON array" )
1038 cmd
= ( CFSD_PREFIX
+ "'[]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1039 ERRORS
+= test_failure ( cmd
, "Object '[]' must be a JSON array with 2 elements" )
1041 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" ]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1042 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" ]' must be a JSON array with 2 elements" )
1044 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" , 5, 8, 9]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1045 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" , 5, 8, 9]' must be a JSON array with 2 elements" )
1047 cmd
= ( CFSD_PREFIX
+ "'[1, 2]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1048 ERRORS
+= test_failure ( cmd
, "Object '[1, 2]' must be a JSON array with the first element a string" )
1050 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.3 \" ,{{ \" snapid \" : \" not an int \" }}]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1051 ERRORS
+= test_failure ( cmd
, "Decode object JSON error: value type is 2 not 4" )
1053 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
1054 ALLPGS
= OBJREPPGS
+ OBJECPGS
1055 OSDS
= get_osds ( ALLPGS
[ 0 ], OSDDIR
)
1058 print ( "Test all --op dump-journal" )
1059 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1060 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1062 # Test --op list and generate json for all objects
1063 print ( "Test --op list variants" )
1065 # retrieve all objects from all PGs
1066 tmpfd
= open ( TMPFILE
, "wb" )
1067 cmd
= ( CFSD_PREFIX
+ "--op list --format json" ). format ( osd
= osd
)
1069 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1071 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1074 lines
= get_lines ( TMPFILE
)
1075 JSONOBJ
= sorted ( set ( lines
))
1076 ( pgid
, coll
, jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1078 # retrieve all objects in a given PG
1079 tmpfd
= open ( OTHERFILE
, "ab" )
1080 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --format json" ). format ( osd
= osd
, pg
= pgid
)
1082 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1084 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1087 lines
= get_lines ( OTHERFILE
)
1088 JSONOBJ
= sorted ( set ( lines
))
1089 ( other_pgid
, other_coll
, other_jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1091 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1092 logging
. error ( "the first line of --op list is different "
1093 "from the first line of --op list --pgid {pg} " . format ( pg
= pgid
))
1096 # retrieve all objects with a given name in a given PG
1097 tmpfd
= open ( OTHERFILE
, "wb" )
1098 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} {object} --format json" ). format ( osd
= osd
, pg
= pgid
, object = jsondict
[ 'oid' ])
1100 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1102 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1105 lines
= get_lines ( OTHERFILE
)
1106 JSONOBJ
= sorted ( set ( lines
))
1107 ( other_pgid
, other_coll
, other_jsondict
) in json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1109 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1110 logging
. error ( "the first line of --op list is different "
1111 "from the first line of --op list --pgid {pg} {object} " . format ( pg
= pgid
, object = jsondict
[ 'oid' ]))
1114 print ( "Test --op list by generating json for all objects using default format" )
1116 OSDS
= get_osds ( pg
, OSDDIR
)
1118 tmpfd
= open ( TMPFILE
, "ab" )
1119 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1121 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1123 logging
. error ( "Bad exit status {ret} from --op list request" . format ( ret
= ret
))
1127 lines
= get_lines ( TMPFILE
)
1128 JSONOBJ
= sorted ( set ( lines
))
1129 for JSON
in JSONOBJ
:
1130 ( pgid
, jsondict
) = json
. loads ( JSON
)
1131 # Skip clones for now
1132 if jsondict
[ 'snapid' ] != - 2 :
1134 db
[ jsondict
[ 'namespace' ]][ jsondict
[ 'oid' ]][ 'json' ] = json
. dumps (( pgid
, jsondict
))
1135 # print db[jsondict['namespace']][jsondict['oid']]['json']
1136 if jsondict
[ 'oid' ]. find ( EC_NAME
) == 0 and 'shard_id' not in jsondict
:
1137 logging
. error ( "Malformed JSON {json} " . format ( json
= JSON
))
1141 print ( "Test get-bytes and set-bytes" )
1142 for nspace
in db
. keys ():
1143 for basename
in db
[ nspace
]. keys ():
1144 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1145 JSON
= db
[ nspace
][ basename
][ 'json' ]
1146 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1147 TESTNAME
= "/tmp/testbytes. {pid} " . format ( pid
= pid
)
1148 SETNAME
= "/tmp/setbytes. {pid} " . format ( pid
= pid
)
1149 BADNAME
= "/tmp/badbytes. {pid} " . format ( pid
= pid
)
1150 for pg
in OBJREPPGS
:
1151 OSDS
= get_osds ( pg
, OSDDIR
)
1153 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1154 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1155 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1162 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-bytes {fname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, fname
= GETNAME
)
1164 ret
= call ( cmd
, shell
= True )
1166 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1169 cmd
= "diff -q {file} {getfile} " . format ( file = file , getfile
= GETNAME
)
1170 ret
= call ( cmd
, shell
= True )
1172 logging
. error ( "Data from get-bytes differ" )
1173 logging
. debug ( "Got:" )
1174 cat_file ( logging
. DEBUG
, GETNAME
)
1175 logging
. debug ( "Expected:" )
1176 cat_file ( logging
. DEBUG
, file )
1178 fd
= open ( SETNAME
, "w" )
1179 data
= "put-bytes going into {file} \n " . format ( file = file )
1182 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= SETNAME
)
1184 ret
= call ( cmd
, shell
= True )
1186 logging
. error ( "Bad exit status {ret} from set-bytes" . format ( ret
= ret
))
1188 fd
= open ( TESTNAME
, "wb" )
1189 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1191 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1194 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1196 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1198 ret
= call ( cmd
, shell
= True )
1200 logging
. error ( "Data after set-bytes differ" )
1201 logging
. debug ( "Got:" )
1202 cat_file ( logging
. DEBUG
, TESTNAME
)
1203 logging
. debug ( "Expected:" )
1204 cat_file ( logging
. DEBUG
, SETNAME
)
1207 # Use set-bytes with --dry-run and make sure contents haven't changed
1208 fd
= open ( BADNAME
, "w" )
1209 data
= "Bad data for --dry-run in {file} \n " . format ( file = file )
1212 cmd
= ( CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= BADNAME
)
1214 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1216 logging
. error ( "Bad exit status {ret} from set-bytes --dry-run" . format ( ret
= ret
))
1218 fd
= open ( TESTNAME
, "wb" )
1219 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1221 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1224 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1226 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1228 ret
= call ( cmd
, shell
= True )
1230 logging
. error ( "Data after set-bytes --dry-run changed!" )
1231 logging
. debug ( "Got:" )
1232 cat_file ( logging
. DEBUG
, TESTNAME
)
1233 logging
. debug ( "Expected:" )
1234 cat_file ( logging
. DEBUG
, SETNAME
)
1237 fd
= open ( file , "rb" )
1238 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1240 ret
= call ( cmd
, shell
= True , stdin
= fd
)
1242 logging
. error ( "Bad exit status {ret} from set-bytes to restore object" . format ( ret
= ret
))
1263 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1264 print ( "Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap" )
1265 for nspace
in db
. keys ():
1266 for basename
in db
[ nspace
]. keys ():
1267 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1268 JSON
= db
[ nspace
][ basename
][ 'json' ]
1269 for pg
in OBJREPPGS
:
1270 OSDS
= get_osds ( pg
, OSDDIR
)
1272 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1273 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1274 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1277 for key
, val
in db
[ nspace
][ basename
][ "xattr" ]. items ():
1279 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= attrkey
)
1281 getval
= check_output ( cmd
, shell
= True )
1283 logging
. error ( "get-attr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= attrkey
, get
= getval
, orig
= val
))
1286 # set-attr to bogus value "foobar"
1287 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1289 ret
= call ( cmd
, shell
= True )
1291 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1294 # Test set-attr with dry-run
1295 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1297 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1299 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1302 # Check the set-attr
1303 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1305 getval
= check_output ( cmd
, shell
= True )
1307 logging
. error ( "Bad exit status {ret} from get-attr" . format ( ret
= ret
))
1310 if getval
!= "foobar" :
1311 logging
. error ( "Check of set-attr failed because we got {val} " . format ( val
= getval
))
1315 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1317 ret
= call ( cmd
, shell
= True )
1319 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1322 # Check rm-attr with dry-run
1323 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1325 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1327 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1330 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1332 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1334 logging
. error ( "For rm-attr expect get-attr to fail, but it succeeded" )
1337 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
, val
= val
)
1339 ret
= call ( cmd
, shell
= True )
1341 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1345 hdr
= db
[ nspace
][ basename
]. get ( "omapheader" , "" )
1346 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, json
= JSON
)
1348 gethdr
= check_output ( cmd
, shell
= True )
1350 logging
. error ( "get-omaphdr was wrong: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
1353 # set-omaphdr to bogus value "foobar"
1354 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1356 ret
= call ( cmd
, shell
= True )
1358 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1361 # Check the set-omaphdr
1362 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1364 gethdr
= check_output ( cmd
, shell
= True )
1366 logging
. error ( "Bad exit status {ret} from get-omaphdr" . format ( ret
= ret
))
1369 if gethdr
!= "foobar" :
1370 logging
. error ( "Check of set-omaphdr failed because we got {val} " . format ( val
= getval
))
1373 # Test dry-run with set-omaphdr
1374 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1376 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1378 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1382 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
, val
= hdr
)
1384 ret
= call ( cmd
, shell
= True )
1386 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1390 for omapkey
, val
in db
[ nspace
][ basename
][ "omap" ]. items ():
1391 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-omap {key} " ). format ( osd
= osd
, json
= JSON
, key
= omapkey
)
1393 getval
= check_output ( cmd
, shell
= True )
1395 logging
. error ( "get-omap of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= omapkey
, get
= getval
, orig
= val
))
1398 # set-omap to bogus value "foobar"
1399 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1401 ret
= call ( cmd
, shell
= True )
1403 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1406 # Check set-omap with dry-run
1407 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1409 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1411 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1414 # Check the set-omap
1415 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1417 getval
= check_output ( cmd
, shell
= True )
1419 logging
. error ( "Bad exit status {ret} from get-omap" . format ( ret
= ret
))
1422 if getval
!= "foobar" :
1423 logging
. error ( "Check of set-omap failed because we got {val} " . format ( val
= getval
))
1427 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1429 ret
= call ( cmd
, shell
= True )
1431 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1433 # Check rm-omap with dry-run
1434 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1436 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1438 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1440 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1442 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1444 logging
. error ( "For rm-omap expect get-omap to fail, but it succeeded" )
1447 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
, val
= val
)
1449 ret
= call ( cmd
, shell
= True )
1451 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1457 for nspace
in db
. keys ():
1458 for basename
in db
[ nspace
]. keys ():
1459 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1460 JSON
= db
[ nspace
][ basename
][ 'json' ]
1461 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1462 for pg
in OBJREPPGS
:
1463 OSDS
= get_osds ( pg
, OSDDIR
)
1465 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1466 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1467 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1470 if int ( basename
. split ( REP_NAME
)[ 1 ]) > int ( NUM_CLONED_REP_OBJECTS
):
1472 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" snap \" : 1,' > /dev/null" ). format ( osd
= osd
, json
= JSON
)
1474 ret
= call ( cmd
, shell
= True )
1476 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1479 print ( "Test list-attrs get-attr" )
1480 ATTRFILE
= r
"/tmp/attrs. {pid} " . format ( pid
= pid
)
1481 VALFILE
= r
"/tmp/val. {pid} " . format ( pid
= pid
)
1482 for nspace
in db
. keys ():
1483 for basename
in db
[ nspace
]. keys ():
1484 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
)
1485 JSON
= db
[ nspace
][ basename
][ 'json' ]
1486 jsondict
= json
. loads ( JSON
)
1488 if 'shard_id' in jsondict
:
1489 logging
. debug ( "ECobject " + JSON
)
1492 OSDS
= get_osds ( pg
, OSDDIR
)
1493 # Fix shard_id since we only have one json instance for each object
1494 jsondict
[ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1495 JSON
= json
. dumps ( jsondict
)
1497 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-attr hinfo_key" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1498 logging
. debug ( "TRY: " + cmd
)
1500 out
= check_output ( cmd
, shell
= True , stderr
= subprocess
. STDOUT
)
1501 logging
. debug ( "FOUND: {json} in {osd} has value ' {val} '" . format ( osd
= osd
, json
= JSON
, val
= out
))
1503 except subprocess
. CalledProcessError
as e
:
1504 if "No such file or directory" not in e
. output
and "No data available" not in e
. output
:
1506 # Assuming k=2 m=1 for the default ec pool
1508 logging
. error ( " {json} hinfo_key found {found} times instead of 3" . format ( json
= JSON
, found
= found
))
1512 # Make sure rep obj with rep pg or ec obj with ec pg
1513 if ( 'shard_id' in jsondict
) != ( pg
. find ( 's' ) > 0 ):
1515 if 'shard_id' in jsondict
:
1516 # Fix shard_id since we only have one json instance for each object
1517 jsondict
[ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1518 JSON
= json
. dumps ( jsondict
)
1519 OSDS
= get_osds ( pg
, OSDDIR
)
1521 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1522 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1523 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1526 afd
= open ( ATTRFILE
, "wb" )
1527 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' list-attrs" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1529 ret
= call ( cmd
, shell
= True , stdout
= afd
)
1532 logging
. error ( "list-attrs failed with {ret} " . format ( ret
= ret
))
1535 keys
= get_lines ( ATTRFILE
)
1536 values
= dict ( db
[ nspace
][ basename
][ "xattr" ])
1538 if key
== "_" or key
== "snapset" or key
== "hinfo_key" :
1540 key
= key
. strip ( "_" )
1541 if key
not in values
:
1542 logging
. error ( "Unexpected key {key} present" . format ( key
= key
))
1545 exp
= values
. pop ( key
)
1546 vfd
= open ( VALFILE
, "wb" )
1547 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= "_" + key
)
1549 ret
= call ( cmd
, shell
= True , stdout
= vfd
)
1552 logging
. error ( "get-attr failed with {ret} " . format ( ret
= ret
))
1555 lines
= get_lines ( VALFILE
)
1558 logging
. error ( "For key {key} got value {got} instead of {expected} " . format ( key
= key
, got
= val
, expected
= exp
))
1560 if len ( values
) != 0 :
1561 logging
. error ( "Not all keys found, remaining keys:" )
1564 print ( "Test --op meta-list" )
1565 tmpfd
= open ( TMPFILE
, "wb" )
1566 cmd
= ( CFSD_PREFIX
+ "--op meta-list" ). format ( osd
= ONEOSD
)
1568 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1570 logging
. error ( "Bad exit status {ret} from --op meta-list request" . format ( ret
= ret
))
1573 print ( "Test get-bytes on meta" )
1575 lines
= get_lines ( TMPFILE
)
1576 JSONOBJ
= sorted ( set ( lines
))
1577 for JSON
in JSONOBJ
:
1578 ( pgid
, jsondict
) = json
. loads ( JSON
)
1580 logging
. error ( "pgid incorrect for --op meta-list {pgid} " . format ( pgid
= pgid
))
1582 if jsondict
[ 'namespace' ] != "" :
1583 logging
. error ( "namespace non null --op meta-list {ns} " . format ( ns
= jsondict
[ 'namespace' ]))
1590 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-bytes {fname} " ). format ( osd
= ONEOSD
, json
= JSON
, fname
= GETNAME
)
1592 ret
= call ( cmd
, shell
= True )
1594 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1606 print ( "Test pg info" )
1607 for pg
in ALLREPPGS
+ ALLECPGS
:
1608 for osd
in get_osds ( pg
, OSDDIR
):
1609 cmd
= ( CFSD_PREFIX
+ "--op info --pgid {pg} | grep ' \" pgid \" : \" {pg} \" '" ). format ( osd
= osd
, pg
= pg
)
1611 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1613 logging
. error ( "Getting info failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1616 print ( "Test pg logging" )
1617 if len ( ALLREPPGS
+ ALLECPGS
) == len ( OBJREPPGS
+ OBJECPGS
):
1618 logging
. warning ( "All PGs have objects, so no log without modify entries" )
1619 for pg
in ALLREPPGS
+ ALLECPGS
:
1620 for osd
in get_osds ( pg
, OSDDIR
):
1621 tmpfd
= open ( TMPFILE
, "wb" )
1622 cmd
= ( CFSD_PREFIX
+ "--op log --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1624 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1626 logging
. error ( "Getting log failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1628 HASOBJ
= pg
in OBJREPPGS
+ OBJECPGS
1630 for line
in get_lines ( TMPFILE
):
1631 if line
. find ( "modify" ) != - 1 :
1634 if HASOBJ
!= MODOBJ
:
1635 logging
. error ( "Bad log for pg {pg} from {osd} " . format ( pg
= pg
, osd
= osd
))
1636 MSG
= ( HASOBJ
and [ "" ] or [ "NOT " ])[ 0 ]
1637 print ( "Log should {msg} have a modify entry" . format ( msg
= MSG
))
1645 print ( "Test list-pgs" )
1646 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1648 CHECK_PGS
= get_osd_pgs ( os
. path
. join ( OSDDIR
, osd
), None )
1649 CHECK_PGS
= sorted ( CHECK_PGS
)
1651 cmd
= ( CFSD_PREFIX
+ "--op list-pgs" ). format ( osd
= osd
)
1653 TEST_PGS
= check_output ( cmd
, shell
= True ). split ( " \n " )
1654 TEST_PGS
= sorted ( TEST_PGS
)[ 1 :] # Skip extra blank line
1656 if TEST_PGS
!= CHECK_PGS
:
1657 logging
. error ( "list-pgs got wrong result for osd. {osd} " . format ( osd
= osd
))
1658 logging
. error ( "Expected {pgs} " . format ( pgs
= CHECK_PGS
))
1659 logging
. error ( "Got {pgs} " . format ( pgs
= TEST_PGS
))
1663 print ( "Test pg export --dry-run" )
1665 osd
= get_osds ( pg
, OSDDIR
)[ 0 ]
1666 fname
= "/tmp/fname. {pid} " . format ( pid
= pid
)
1667 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1669 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1671 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1673 elif os
. path
. exists ( fname
):
1674 logging
. error ( "Exporting --dry-run created file" )
1677 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1679 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1681 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1684 outdata
= get_lines ( fname
)
1685 if len ( outdata
) > 0 :
1686 logging
. error ( "Exporting --dry-run to stdout not empty" )
1687 logging
. error ( "Data: " + outdata
)
1691 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1692 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1693 print ( "Test pg export" )
1694 for pg
in ALLREPPGS
+ ALLECPGS
:
1695 for osd
in get_osds ( pg
, OSDDIR
):
1696 mydir
= os
. path
. join ( TESTDIR
, osd
)
1697 fname
= os
. path
. join ( mydir
, pg
)
1698 if pg
== ALLREPPGS
[ 0 ]:
1699 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1700 elif pg
== ALLREPPGS
[ 1 ]:
1701 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file - > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1703 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1705 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1707 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1710 ERRORS
+= EXP_ERRORS
1712 print ( "Test pg removal" )
1714 for pg
in ALLREPPGS
+ ALLECPGS
:
1715 for osd
in get_osds ( pg
, OSDDIR
):
1716 # This should do nothing
1717 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} --dry-run" ). format ( pg
= pg
, osd
= osd
)
1719 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1721 logging
. error ( "Removing --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1723 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1725 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1727 logging
. error ( "Removing failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1733 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 :
1734 print ( "Test pg import" )
1735 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1736 dir = os
. path
. join ( TESTDIR
, osd
)
1737 PGS
= [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]
1739 file = os
. path
. join ( dir , pg
)
1740 # This should do nothing
1741 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} --dry-run" ). format ( osd
= osd
, file = file )
1743 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1745 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1748 cmd
= ( "cat {file} |" . format ( file = file ) + CFSD_PREFIX
+ "--op import" ). format ( osd
= osd
)
1750 cmd
= ( CFSD_PREFIX
+ "--op import --file - --pgid {pg} < {file} " ). format ( osd
= osd
, file = file , pg
= pg
)
1752 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} " ). format ( osd
= osd
, file = file )
1754 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1756 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1759 logging
. warning ( "SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES" )
1761 ERRORS
+= IMP_ERRORS
1764 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1765 print ( "Verify replicated import data" )
1766 data_errors
, _
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, REP_NAME
)
1767 ERRORS
+= data_errors
1769 logging
. warning ( "SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES" )
1771 print ( "Test all --op dump-journal again" )
1772 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1773 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1778 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1779 print ( "Verify erasure coded import data" )
1780 ERRORS
+= verify ( DATADIR
, EC_POOL
, EC_NAME
, db
)
1781 # Check replicated data/xattr/omap using rados
1782 print ( "Verify replicated import data using rados" )
1783 ERRORS
+= verify ( DATADIR
, REP_POOL
, REP_NAME
, db
)
1786 NEWPOOL
= "rados-import-pool"
1787 cmd
= " {path} /rados mkpool {pool} " . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1789 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1791 print ( "Test rados import" )
1793 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1794 dir = os
. path
. join ( TESTDIR
, osd
)
1795 for pg
in [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]:
1796 if pg
. find ( " {id} ." . format ( id = REPID
)) != 0 :
1798 file = os
. path
. join ( dir , pg
)
1801 # This should do nothing
1802 cmd
= " {path} /rados import -p {pool} --dry-run {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1804 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1806 logging
. error ( "Rados import --dry-run failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1808 cmd
= " {path} /rados -p {pool} ls" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1810 data
= check_output ( cmd
, shell
= True )
1812 logging
. error ( "' {data} '" . format ( data
= data
))
1813 logging
. error ( "Found objects after dry-run" )
1815 cmd
= " {path} /rados import -p {pool} {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1817 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1819 logging
. error ( "Rados import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1821 cmd
= " {path} /rados import -p {pool} --no-overwrite {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1823 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1825 logging
. error ( "Rados import --no-overwrite failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1828 ERRORS
+= verify ( DATADIR
, NEWPOOL
, REP_NAME
, db
)
1830 logging
. warning ( "SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES" )
1832 # Clear directories of previous portion
1833 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1834 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1838 # Cause SPLIT_POOL to split and test import with object/log filtering
1839 print ( "Testing import all objects after a split" )
1840 SPLIT_POOL
= "split_pool"
1843 SPLIT_NSPACE_COUNT
= 2
1844 SPLIT_NAME
= "split"
1845 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= SPLIT_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
1847 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1848 SPLITID
= get_pool_id ( SPLIT_POOL
, nullfd
)
1849 pool_size
= int ( check_output ( " {path} /ceph osd pool get {pool} size" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
), shell
= True , stderr
= nullfd
). split ( " " )[ 1 ])
1854 objects
= range ( 1 , SPLIT_OBJ_COUNT
+ 1 )
1855 nspaces
= range ( SPLIT_NSPACE_COUNT
)
1857 nspace
= get_nspace ( n
)
1860 NAME
= SPLIT_NAME
+ " {num} " . format ( num
= i
)
1861 LNAME
= nspace
+ "-" + NAME
1862 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
1865 cmd
= "rm -f " + DDNAME
1867 call ( cmd
, shell
= True )
1870 dataline
= range ( DATALINECOUNT
)
1873 fd
= open ( DDNAME
, "w" )
1874 data
= "This is the split data for " + LNAME
+ " \n "
1879 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= SPLIT_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
1881 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
1883 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
1889 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1890 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1892 pg
= " {pool} .0" . format ( pool
= SPLITID
)
1895 export_osds
= get_osds ( pg
, OSDDIR
)
1896 for osd
in export_osds
:
1897 mydir
= os
. path
. join ( TESTDIR
, osd
)
1898 fname
= os
. path
. join ( mydir
, pg
)
1899 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1901 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1903 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1906 ERRORS
+= EXP_ERRORS
1912 cmd
= " {path} /ceph osd pool set {pool} pg_num 2" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
)
1914 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1920 # Now 2 PGs, poolid.0 and poolid.1
1921 for seed
in range ( 2 ):
1922 pg
= " {pool} . {seed} " . format ( pool
= SPLITID
, seed
= seed
)
1925 for osd
in get_osds ( pg
, OSDDIR
):
1926 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1928 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1930 # This is weird. The export files are based on only the EXPORT_PG
1931 # and where that pg was before the split. Use 'which' to use all
1932 # export copies in import.
1933 mydir
= os
. path
. join ( TESTDIR
, export_osds
[ which
])
1934 fname
= os
. path
. join ( mydir
, EXPORT_PG
)
1936 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1938 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1940 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1943 ERRORS
+= IMP_ERRORS
1945 # Start up again to make sure imports didn't corrupt anything
1947 print ( "Verify split import data" )
1948 data_errors
, count
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
)
1949 ERRORS
+= data_errors
1950 if count
!= ( SPLIT_OBJ_COUNT
* SPLIT_NSPACE_COUNT
* pool_size
):
1951 logging
. error ( "Incorrect number of replicas seen {count} " . format ( count
= count
))
1956 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1957 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1959 ERRORS
+= test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
)
1961 # vstart() starts 4 OSDs
1962 ERRORS
+= test_get_set_osdmap ( CFSD_PREFIX
, list ( range ( 4 )), ALLOSDS
)
1963 ERRORS
+= test_get_set_inc_osdmap ( CFSD_PREFIX
, ALLOSDS
[ 0 ])
1965 print ( "TEST PASSED" )
1968 print ( "TEST FAILED WITH {errcount} ERRORS" . format ( errcount
= ERRORS
))
1972 def remove_btrfs_subvolumes ( path
):
1973 if platform
. system () == "FreeBSD" :
1975 result
= subprocess
. Popen ( "stat -f -c ' %% T' %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
1976 for line
in result
. stdout
:
1977 filesystem
= decode ( line
). rstrip ( ' \n ' )
1978 if filesystem
== "btrfs" :
1979 result
= subprocess
. Popen ( "sudo btrfs subvolume list %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
1980 for line
in result
. stdout
:
1981 subvolume
= decode ( line
). split ()[ 8 ]
1982 # extracting the relative volume name
1983 m
= re
. search ( ".*( %s .*)" % path
, subvolume
)
1986 call ( "sudo btrfs subvolume delete %s " % found
, shell
= True )
1989 if __name__
== "__main__" :
1992 status
= main ( sys
. argv
[ 1 :])
1995 remove_btrfs_subvolumes ( CEPH_DIR
)
1996 call ( "/bin/rm -fr {dir} " . format ( dir = CEPH_DIR
), shell
= True )