]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/special/ceph_objectstore_tool.py
3 from __future__
import print_function
4 from subprocess
import call
6 from subprocess
import check_output
8 def check_output (* popenargs
, ** kwargs
):
10 # backported from python 2.7 stdlib
11 process
= subprocess
. Popen (
12 stdout
= subprocess
. PIPE
, * popenargs
, ** kwargs
)
13 output
, unused_err
= process
. communicate ()
14 retcode
= process
. poll ()
16 cmd
= kwargs
. get ( "args" )
19 error
= subprocess
. CalledProcessError ( retcode
, cmd
)
37 from subprocess
import DEVNULL
39 DEVNULL
= open ( os
. devnull
, "wb" )
41 logging
. basicConfig ( format
= ' %(levelname)s : %(message)s ' , level
= logging
. WARNING
)
44 if sys
. version_info
[ 0 ] >= 3 :
46 return s
. decode ( 'utf-8' )
48 def check_output (* args
, ** kwargs
):
49 return decode ( subprocess
. check_output (* args
, ** kwargs
))
56 def wait_for_health ():
57 print ( "Wait for health_ok..." , end
= "" )
59 while call ( " {path} /ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null" . format ( path
= CEPH_BIN
), shell
= True ) == 0 :
62 raise Exception ( "Time exceeded to go to health" )
67 def get_pool_id ( name
, nullfd
):
68 cmd
= " {path} /ceph osd pool stats {pool} " . format ( pool
= name
, path
= CEPH_BIN
). split ()
69 # pool {pool} id # .... grab the 4 field
70 return check_output ( cmd
, stderr
= nullfd
). split ()[ 3 ]
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs ( SUBDIR
, ID
):
77 endhead
= re
. compile ( " {id} .*_head$" . format ( id = ID
))
78 DIR
= os
. path
. join ( SUBDIR
, "current" )
79 PGS
+= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and ( ID
is None or endhead
. match ( f
))]
80 PGS
= [ re
. sub ( "_head" , "" , p
) for p
in PGS
if "_head" in p
]
84 # return a sorted list of unique PGs given a directory
86 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
89 SUBDIR
= os
. path
. join ( DIR
, d
)
90 PGS
+= get_osd_pgs ( SUBDIR
, ID
)
91 return sorted ( set ( PGS
))
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs ( ALLPGS
, prefix
, DIR
, ID
):
96 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
99 DIRL2
= os
. path
. join ( DIR
, d
)
100 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
103 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
105 FINALDIR
= os
. path
. join ( SUBDIR
, PGDIR
)
106 # See if there are any objects there
107 if any ( f
for f
in [ val
for _
, _
, fl
in os
. walk ( FINALDIR
) for val
in fl
] if f
. startswith ( prefix
)):
109 return sorted ( set ( PGS
))
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds ( PG
, DIR
):
114 ALLOSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
117 DIRL2
= os
. path
. join ( DIR
, d
)
118 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
120 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
126 def get_lines ( filename
):
127 tmpfd
= open ( filename
, "r" )
131 line
= tmpfd
. readline (). rstrip ( ' \n ' )
139 def cat_file ( level
, filename
):
140 if level
< logging
. getLogger (). getEffectiveLevel ():
142 print ( "File: " + filename
)
143 with
open ( filename
, "r" ) as f
:
145 line
= f
. readline (). rstrip ( ' \n ' )
152 def vstart ( new
, opt
= "" ):
153 print ( "vstarting...." , end
= "" )
154 NEW
= new
and "-n" or "-N"
155 call ( "MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path} /src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1" . format ( new
= NEW
, opt
= opt
, path
= CEPH_ROOT
), shell
= True )
159 def test_failure ( cmd
, errmsg
, tty
= False ):
162 ttyfd
= open ( "/dev/tty" , "rwb" )
163 except Exception as e
:
165 logging
. info ( "SKIP " + cmd
)
167 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
168 tmpfd
= open ( TMPFILE
, "wb" )
172 ret
= call ( cmd
, shell
= True , stdin
= ttyfd
, stdout
= ttyfd
, stderr
= tmpfd
)
175 ret
= call ( cmd
, shell
= True , stderr
= tmpfd
)
179 logging
. error ( "Should have failed, but got exit 0" )
181 lines
= get_lines ( TMPFILE
)
182 matched
= [ l
for l
in lines
if errmsg
in l
]
184 logging
. info ( "Correctly failed with message \" " + matched
[ 0 ] + " \" " )
187 logging
. error ( "Command: " + cmd
)
188 logging
. error ( "Bad messages to stderr \" " + str ( lines
) + " \" " )
189 logging
. error ( "Expected \" " + errmsg
+ " \" " )
196 return "ns {num} " . format ( num
= num
)
199 def verify ( DATADIR
, POOL
, NAME_PREFIX
, db
):
200 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
202 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( NAME_PREFIX
) == 0 ]:
203 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
204 clone
= rawnsfile
. split ( "__" )[ 1 ]
205 nspace
= nsfile
. split ( "-" )[ 0 ]
206 file = nsfile
. split ( "-" )[ 1 ]
210 path
= os
. path
. join ( DATADIR
, rawnsfile
)
215 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' get {file} {out} " . format ( pool
= POOL
, file = file , out
= TMPFILE
, nspace
= nspace
, path
= CEPH_BIN
)
217 call ( cmd
, shell
= True , stdout
= DEVNULL
, stderr
= DEVNULL
)
218 cmd
= "diff -q {src} {result} " . format ( src
= path
, result
= TMPFILE
)
220 ret
= call ( cmd
, shell
= True )
222 logging
. error ( " {file} data not imported properly" . format ( file = file ))
228 for key
, val
in db
[ nspace
][ file ][ "xattr" ]. items ():
229 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getxattr {name} {key} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, path
= CEPH_BIN
)
231 getval
= check_output ( cmd
, shell
= True , stderr
= DEVNULL
)
232 logging
. debug ( "getxattr {key} {val} " . format ( key
= key
, val
= getval
))
234 logging
. error ( "getxattr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= key
, get
= getval
, orig
= val
))
237 hdr
= db
[ nspace
][ file ]. get ( "omapheader" , "" )
238 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapheader {name} {file} " . format ( pool
= POOL
, name
= file , nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
240 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
242 logging
. error ( "rados getomapheader returned {ret} " . format ( ret
= ret
))
245 getlines
= get_lines ( TMPFILE
)
246 assert ( len ( getlines
) == 0 or len ( getlines
) == 1 )
247 if len ( getlines
) == 0 :
251 logging
. debug ( "header: {hdr} " . format ( hdr
= gethdr
))
253 logging
. error ( "getomapheader returned wrong val: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
255 for key
, val
in db
[ nspace
][ file ][ "omap" ]. items ():
256 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapval {name} {key} {file} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
258 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
260 logging
. error ( "getomapval returned {ret} " . format ( ret
= ret
))
263 getlines
= get_lines ( TMPFILE
)
264 if len ( getlines
) != 1 :
265 logging
. error ( "Bad data from getomapval {lines} " . format ( lines
= getlines
))
269 logging
. debug ( "getomapval {key} {val} " . format ( key
= key
, val
= getval
))
271 logging
. error ( "getomapval returned wrong val: {get} instead of {orig} " . format ( get
= getval
, orig
= val
))
280 def check_journal ( jsondict
):
282 if 'header' not in jsondict
:
283 logging
. error ( "Key 'header' not in dump-journal" )
285 elif 'max_size' not in jsondict
[ 'header' ]:
286 logging
. error ( "Key 'max_size' not in dump-journal header" )
289 print ( " \t Journal max_size = {size} " . format ( size
= jsondict
[ 'header' ][ 'max_size' ]))
290 if 'entries' not in jsondict
:
291 logging
. error ( "Key 'entries' not in dump-journal output" )
293 elif len ( jsondict
[ 'entries' ]) == 0 :
294 logging
. info ( "No entries in journal found" )
296 errors
+= check_journal_entries ( jsondict
[ 'entries' ])
300 def check_journal_entries ( entries
):
302 for enum
in range ( len ( entries
)):
303 if 'offset' not in entries
[ enum
]:
304 logging
. error ( "No 'offset' key in entry {e} " . format ( e
= enum
))
306 if 'seq' not in entries
[ enum
]:
307 logging
. error ( "No 'seq' key in entry {e} " . format ( e
= enum
))
309 if 'transactions' not in entries
[ enum
]:
310 logging
. error ( "No 'transactions' key in entry {e} " . format ( e
= enum
))
312 elif len ( entries
[ enum
][ 'transactions' ]) == 0 :
313 logging
. error ( "No transactions found in entry {e} " . format ( e
= enum
))
316 errors
+= check_entry_transactions ( entries
[ enum
], enum
)
320 def check_entry_transactions ( entry
, enum
):
322 for tnum
in range ( len ( entry
[ 'transactions' ])):
323 if 'trans_num' not in entry
[ 'transactions' ][ tnum
]:
324 logging
. error ( "Key 'trans_num' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
326 elif entry
[ 'transactions' ][ tnum
][ 'trans_num' ] != tnum
:
327 ft
= entry
[ 'transactions' ][ tnum
][ 'trans_num' ]
328 logging
. error ( "Bad trans_num ( {ft} ) entry {e} trans {t} " . format ( ft
= ft
, e
= enum
, t
= tnum
))
330 if 'ops' not in entry
[ 'transactions' ][ tnum
]:
331 logging
. error ( "Key 'ops' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
334 errors
+= check_transaction_ops ( entry
[ 'transactions' ][ tnum
][ 'ops' ], enum
, tnum
)
338 def check_transaction_ops ( ops
, enum
, tnum
):
340 logging
. warning ( "No ops found in entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
342 for onum
in range ( len ( ops
)):
343 if 'op_num' not in ops
[ onum
]:
344 logging
. error ( "Key 'op_num' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
346 elif ops
[ onum
][ 'op_num' ] != onum
:
347 fo
= ops
[ onum
][ 'op_num' ]
348 logging
. error ( "Bad op_num ( {fo} ) from entry {e} trans {t} op {o} " . format ( fo
= fo
, e
= enum
, t
= tnum
, o
= onum
))
350 if 'op_name' not in ops
[ onum
]:
351 logging
. error ( "Key 'op_name' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
356 def test_dump_journal ( CFSD_PREFIX
, osds
):
359 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
362 # Test --op dump-journal by loading json
363 cmd
= ( CFSD_PREFIX
+ "--op dump-journal --format json" ). format ( osd
= osd
)
365 tmpfd
= open ( TMPFILE
, "wb" )
366 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
368 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
372 tmpfd
= open ( TMPFILE
, "r" )
373 jsondict
= json
. load ( tmpfd
)
377 journal_errors
= check_journal ( jsondict
)
378 if journal_errors
is not 0 :
379 logging
. error ( jsondict
)
380 ERRORS
+= journal_errors
384 CEPH_BUILD_DIR
= os
. environ
. get ( 'CEPH_BUILD_DIR' )
385 CEPH_BIN
= os
. environ
. get ( 'CEPH_BIN' )
386 CEPH_ROOT
= os
. environ
. get ( 'CEPH_ROOT' )
388 if not CEPH_BUILD_DIR
:
389 CEPH_BUILD_DIR
= os
. getcwd ()
390 os
. putenv ( 'CEPH_BUILD_DIR' , CEPH_BUILD_DIR
)
391 CEPH_BIN
= os
. path
. join ( CEPH_BUILD_DIR
, 'bin' )
392 os
. putenv ( 'CEPH_BIN' , CEPH_BIN
)
393 CEPH_ROOT
= os
. path
. dirname ( CEPH_BUILD_DIR
)
394 os
. putenv ( 'CEPH_ROOT' , CEPH_ROOT
)
395 CEPH_LIB
= os
. path
. join ( CEPH_BUILD_DIR
, 'lib' )
396 os
. putenv ( 'CEPH_LIB' , CEPH_LIB
)
401 pass # ok if this is already there
402 CEPH_DIR
= os
. path
. join ( CEPH_BUILD_DIR
, os
. path
. join ( "td" , "cot_dir" ))
403 CEPH_CONF
= os
. path
. join ( CEPH_DIR
, 'ceph.conf' )
406 call ( " {path} /init-ceph -c {conf} stop > /dev/null 2>&1" . format ( conf
= CEPH_CONF
, path
= CEPH_BIN
), shell
= True )
409 def check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
):
412 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( SPLIT_NAME
) == 0 ]:
413 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
414 clone
= rawnsfile
. split ( "__" )[ 1 ]
415 nspace
= nsfile
. split ( "-" )[ 0 ]
416 file = nsfile
. split ( "-" )[ 1 ] + "__" + clone
420 path
= os
. path
. join ( DATADIR
, rawnsfile
)
421 tmpfd
= open ( TMPFILE
, "wb" )
422 cmd
= "find {dir} -name ' {file} _*_ {nspace} _*'" . format ( dir = OSDDIR
, file = file , nspace
= nspace
)
424 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
426 logging
. critical ( "INTERNAL ERROR" )
429 obj_locs
= get_lines ( TMPFILE
)
430 if len ( obj_locs
) == 0 :
431 logging
. error ( "Can't find imported object {name} " . format ( name
= file ))
433 for obj_loc
in obj_locs
:
434 # For btrfs skip snap_* dirs
435 if re
. search ( "/snap_[0-9]*/" , obj_loc
) is not None :
438 cmd
= "diff -q {src} {obj_loc} " . format ( src
= path
, obj_loc
= obj_loc
)
440 ret
= call ( cmd
, shell
= True )
442 logging
. error ( " {file} data not imported properly into {obj} " . format ( file = file , obj
= obj_loc
))
444 return ERRORS
, repcount
447 def set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
448 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
449 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
450 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
451 osdmap_file
= osdmap_file
. name
)
452 output
= check_output ( cmd
, shell
= True )
453 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
455 new_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
456 old_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
457 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
458 crush_file
= old_crush_file
. name
, path
= CEPH_BIN
),
464 for osd_id
in osd_ids
:
465 cmd
= " {path} /crushtool -i {crush_file} --reweight-item osd. {osd} {weight} -o {new_crush_file} " . format ( osd
= osd_id
,
466 crush_file
= old_crush_file
. name
,
468 new_crush_file
= new_crush_file
. name
, path
= CEPH_BIN
)
469 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
471 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
473 # change them back, since we don't need to preapre for another round
474 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
475 old_crush_file
. close ()
477 ret
= call ( " {path} /osdmaptool --import-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
478 crush_file
= new_crush_file
. name
, path
= CEPH_BIN
),
484 # Minimum test of --dry-run by using it, but not checking anything
485 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
486 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
487 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
490 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
491 # to use use a different epoch than the one in osdmap
492 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
493 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
494 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
498 def get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
):
499 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
500 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
501 osdmap_file
= osdmap_file
. name
)
502 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
505 # we have to read the weights from the crush map, even we can query the weights using
506 # osdmaptool, but please keep in mind, they are different:
507 # item weights in crush map versus weight associated with each osd in osdmap
508 crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
509 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
510 crush_file
= crush_file
. name
, path
= CEPH_BIN
),
514 output
= check_output ( " {path} /crushtool --tree -i {crush_file} | tail -n {num_osd} " . format ( crush_file
= crush_file
. name
,
515 num_osd
= len ( osd_ids
), path
= CEPH_BIN
),
519 for line
in output
. strip (). split ( ' \n ' ):
521 linev
= re
. split ( '\s+' , line
)
524 print ( 'linev %s ' % linev
)
525 weights
. append ( float ( linev
[ 2 ]))
530 def test_get_set_osdmap ( CFSD_PREFIX
, osd_ids
, osd_paths
):
531 print ( "Testing get-osdmap and set-osdmap" )
534 weight
= 1 / math
. e
# just some magic number in [0, 1]
536 for osd_path
in osd_paths
:
537 if set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
538 changed
. append ( osd_path
)
540 logging
. warning ( "Failed to change the weights: {0} " . format ( osd_path
))
541 # i am pissed off if none of the store gets changed
545 for osd_path
in changed
:
546 weights
= get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
)
550 if any ( abs ( w
- weight
) > 1e-5 for w
in weights
):
551 logging
. warning ( "Weight is not changed: {0} != {1} " . format ( weights
, weight
))
555 def test_get_set_inc_osdmap ( CFSD_PREFIX
, osd_path
):
556 # incrementals are not used unless we need to build an MOSDMap to update
557 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
558 # with a different copy, and read it back to see if it matches.
560 file_e2
= tempfile
. NamedTemporaryFile ( delete
= True )
561 cmd
= ( CFSD_PREFIX
+ "--op get-inc-osdmap --file {file} " ). format ( osd
= osd_path
,
563 output
= check_output ( cmd
, shell
= True )
564 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
565 # backup e1 incremental before overwriting it
567 file_e1_backup
= tempfile
. NamedTemporaryFile ( delete
= True )
568 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
569 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
571 # overwrite e1 with e2
572 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --force --epoch {epoch} --file {file} "
573 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e2
. name
), shell
= True )
575 # Use dry-run to set back to e1 which shouldn't happen
576 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file} "
577 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
580 file_e1_read
= tempfile
. NamedTemporaryFile ( delete
= True )
581 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
582 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_read
. name
), shell
= True )
586 if not filecmp
. cmp ( file_e2
. name
, file_e1_read
. name
, shallow
= False ):
587 logging
. error ( "{{get,set}}-inc-osdmap mismatch {0} != {1} " . format ( file_e2
. name
, file_e1_read
. name
))
590 # revert the change with file_e1_backup
591 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --epoch {epoch} --file {file} "
592 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
594 logging
. error ( "Failed to revert the changed inc-osdmap" )
600 def test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
):
602 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
603 nullfd
= open ( os
. devnull
, "w" )
605 print ( "Test removeall" )
607 for nspace
in db
. keys ():
608 for basename
in db
[ nspace
]. keys ():
609 JSON
= db
[ nspace
][ basename
][ 'json' ]
611 OSDS
= get_osds ( pg
, OSDDIR
)
613 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
614 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
615 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
619 if int ( basename
. split ( REP_NAME
)[ 1 ]) <= int ( NUM_CLONED_REP_OBJECTS
):
620 cmd
= ( CFSD_PREFIX
+ "' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
621 errors
+= test_failure ( cmd
, "Snapshots are present, use removeall to delete everything" )
623 cmd
= ( CFSD_PREFIX
+ " --force --dry-run ' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
625 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
627 logging
. error ( "remove with --force failed for {json} " . format ( json
= JSON
))
630 cmd
= ( CFSD_PREFIX
+ " --dry-run ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
632 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
634 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
637 cmd
= ( CFSD_PREFIX
+ " ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
639 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
641 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
644 tmpfd
= open ( TMPFILE
, "w" )
645 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --namespace {ns} {name} " ). format ( osd
= osd
, pg
= pg
, ns
= nspace
, name
= basename
)
647 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
649 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
652 lines
= get_lines ( TMPFILE
)
654 logging
. error ( "Removeall didn't remove all objects {ns} / {name} : {lines} " . format ( ns
= nspace
, name
= basename
, lines
= lines
))
658 cmd
= " {path} /rados -p {pool} rmsnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
660 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
662 logging
. error ( "rados rmsnap failed" )
670 if sys
. version_info
[ 0 ] < 3 :
671 sys
. stdout
= stdout
= os
. fdopen ( sys
. stdout
. fileno (), 'wb' , 0 )
673 stdout
= sys
. stdout
. buffer
674 if len ( argv
) > 1 and argv
[ 1 ] == "debug" :
679 call ( "rm -fr {dir} ; mkdir -p {dir} " . format ( dir = CEPH_DIR
), shell
= True )
681 os
. environ
[ "CEPH_DIR" ] = CEPH_DIR
683 REP_POOL
= "rep_pool"
684 REP_NAME
= "REPobject"
687 if len ( argv
) > 0 and argv
[ 0 ] == 'large' :
689 NUM_REP_OBJECTS
= 800
690 NUM_CLONED_REP_OBJECTS
= 100
693 # Larger data sets for first object per namespace
694 DATALINECOUNT
= 50000
695 # Number of objects to do xattr/omap testing on
700 NUM_CLONED_REP_OBJECTS
= 2
703 # Larger data sets for first object per namespace
705 # Number of objects to do xattr/omap testing on
709 TESTDIR
= "/tmp/test. {pid} " . format ( pid
= pid
)
710 DATADIR
= "/tmp/data. {pid} " . format ( pid
= pid
)
711 CFSD_PREFIX
= CEPH_BIN
+ "/ceph-objectstore-tool --data-path " + OSDDIR
+ "/ {osd} "
712 PROFNAME
= "testecprofile"
714 os
. environ
[ 'CEPH_CONF' ] = CEPH_CONF
718 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= REP_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
720 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
722 REPID
= get_pool_id ( REP_POOL
, nullfd
)
724 print ( "Created Replicated pool # {repid} " . format ( repid
= REPID
))
726 cmd
= " {path} /ceph osd erasure-code-profile set {prof} crush-failure-domain=osd" . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
728 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
729 cmd
= " {path} /ceph osd erasure-code-profile get {prof} " . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
731 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
732 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} erasure {prof} " . format ( pool
= EC_POOL
, prof
= PROFNAME
, pg
= PG_COUNT
, path
= CEPH_BIN
)
734 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
735 ECID
= get_pool_id ( EC_POOL
, nullfd
)
737 print ( "Created Erasure coded pool # {ecid} " . format ( ecid
= ECID
))
739 print ( "Creating {objs} objects in replicated pool" . format ( objs
=( NUM_REP_OBJECTS
* NUM_NSPACES
)))
740 cmd
= "mkdir -p {datadir} " . format ( datadir
= DATADIR
)
742 call ( cmd
, shell
= True )
746 objects
= range ( 1 , NUM_REP_OBJECTS
+ 1 )
747 nspaces
= range ( NUM_NSPACES
)
749 nspace
= get_nspace ( n
)
754 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
755 LNAME
= nspace
+ "-" + NAME
756 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
759 cmd
= "rm -f " + DDNAME
761 call ( cmd
, shell
= True )
764 dataline
= range ( DATALINECOUNT
)
767 fd
= open ( DDNAME
, "w" )
768 data
= "This is the replicated data for " + LNAME
+ " \n "
773 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
775 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
777 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
780 db
[ nspace
][ NAME
] = {}
782 if i
< ATTR_OBJS
+ 1 :
786 db
[ nspace
][ NAME
][ "xattr" ] = {}
790 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
791 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
792 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
794 ret
= call ( cmd
, shell
= True )
796 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
798 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
800 # Create omap header in all objects but REPobject1
801 if i
< ATTR_OBJS
+ 1 and i
!= 1 :
802 myhdr
= "hdr {i} " . format ( i
= i
)
803 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapheader {name} {hdr} " . format ( pool
= REP_POOL
, name
= NAME
, hdr
= myhdr
, nspace
= nspace
, path
= CEPH_BIN
)
805 ret
= call ( cmd
, shell
= True )
807 logging
. critical ( "setomapheader failed with {ret} " . format ( ret
= ret
))
809 db
[ nspace
][ NAME
][ "omapheader" ] = myhdr
811 db
[ nspace
][ NAME
][ "omap" ] = {}
815 mykey
= "okey {i} - {k} " . format ( i
= i
, k
= k
)
816 myval
= "oval {i} - {k} " . format ( i
= i
, k
= k
)
817 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapval {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
819 ret
= call ( cmd
, shell
= True )
821 logging
. critical ( "setomapval failed with {ret} " . format ( ret
= ret
))
822 db
[ nspace
][ NAME
][ "omap" ][ mykey
] = myval
825 cmd
= " {path} /rados -p {pool} mksnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
827 call ( cmd
, shell
= True )
829 objects
= range ( 1 , NUM_CLONED_REP_OBJECTS
+ 1 )
830 nspaces
= range ( NUM_NSPACES
)
832 nspace
= get_nspace ( n
)
835 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
836 LNAME
= nspace
+ "-" + NAME
837 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
839 CLONENAME
= DDNAME
+ "__1"
842 cmd
= "mv -f " + DDNAME
+ " " + CLONENAME
844 call ( cmd
, shell
= True )
847 dataline
= range ( DATALINECOUNT
)
850 fd
= open ( DDNAME
, "w" )
851 data
= "This is the replicated data after a snapshot for " + LNAME
+ " \n "
856 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
858 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
860 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
863 print ( "Creating {objs} objects in erasure coded pool" . format ( objs
=( NUM_EC_OBJECTS
* NUM_NSPACES
)))
865 objects
= range ( 1 , NUM_EC_OBJECTS
+ 1 )
866 nspaces
= range ( NUM_NSPACES
)
868 nspace
= get_nspace ( n
)
871 NAME
= EC_NAME
+ " {num} " . format ( num
= i
)
872 LNAME
= nspace
+ "-" + NAME
873 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
876 cmd
= "rm -f " + DDNAME
878 call ( cmd
, shell
= True )
881 dataline
= range ( DATALINECOUNT
)
884 fd
= open ( DDNAME
, "w" )
885 data
= "This is the erasure coded data for " + LNAME
+ " \n "
890 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= EC_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
892 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
894 logging
. critical ( "Erasure coded pool creation failed with {ret} " . format ( ret
= ret
))
897 db
[ nspace
][ NAME
] = {}
899 db
[ nspace
][ NAME
][ "xattr" ] = {}
900 if i
< ATTR_OBJS
+ 1 :
907 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
908 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
909 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= EC_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
911 ret
= call ( cmd
, shell
= True )
913 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
915 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
917 # Omap isn't supported in EC pools
918 db
[ nspace
][ NAME
][ "omap" ] = {}
925 logging
. critical ( "Unable to set up test" )
928 ALLREPPGS
= get_pgs ( OSDDIR
, REPID
)
929 logging
. debug ( ALLREPPGS
)
930 ALLECPGS
= get_pgs ( OSDDIR
, ECID
)
931 logging
. debug ( ALLECPGS
)
933 OBJREPPGS
= get_objs ( ALLREPPGS
, REP_NAME
, OSDDIR
, REPID
)
934 logging
. debug ( OBJREPPGS
)
935 OBJECPGS
= get_objs ( ALLECPGS
, EC_NAME
, OSDDIR
, ECID
)
936 logging
. debug ( OBJECPGS
)
940 osds
= get_osds ( ONEPG
, OSDDIR
)
942 logging
. debug ( ONEOSD
)
944 print ( "Test invalid parameters" )
945 # On export can't use stdout to a terminal
946 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
947 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
949 # On export can't use stdout to a terminal
950 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
951 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
953 # Prep a valid ec export file for import failure tests
954 ONEECPG
= ALLECPGS
[ 0 ]
955 osds
= get_osds ( ONEECPG
, OSDDIR
)
957 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
958 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= ONEECPG
, file = OTHERFILE
)
960 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
962 # On import can't specify a different shard
963 BADPG
= ONEECPG
. split ( 's' )[ 0 ] + "s10"
964 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= BADPG
, file = OTHERFILE
)
965 ERRORS
+= test_failure ( cmd
, "Can't specify a different shard, must be" )
969 # Prep a valid export file for import failure tests
970 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
971 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
973 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
975 # On import can't specify a PG with a non-existent pool
976 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= "10.0" , file = OTHERFILE
)
977 ERRORS
+= test_failure ( cmd
, "Can't specify a different pgid pool, must be" )
979 # On import can't specify shard for a replicated export
980 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} s0 --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
981 ERRORS
+= test_failure ( cmd
, "Can't specify a sharded pgid with a non-sharded export" )
983 # On import can't specify a PG with a bad seed
984 TMPPG
= " {pool} .80" . format ( pool
= REPID
)
985 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= TMPPG
, file = OTHERFILE
)
986 ERRORS
+= test_failure ( cmd
, "Illegal pgid, the seed is larger than current pg_num" )
989 cmd
= ( CFSD_PREFIX
+ "--op import --file {FOO} " ). format ( osd
= ONEOSD
, FOO
= OTHERFILE
)
990 ERRORS
+= test_failure ( cmd
, "file: {FOO} : No such file or directory" . format ( FOO
= OTHERFILE
))
992 cmd
= " {path} /ceph-objectstore-tool --data-path BAD_DATA_PATH --op list" . format ( osd
= ONEOSD
, path
= CEPH_BIN
)
993 ERRORS
+= test_failure ( cmd
, "data-path: BAD_DATA_PATH: No such file or directory" )
995 cmd
= ( CFSD_PREFIX
+ "--journal-path BAD_JOURNAL_PATH --op list" ). format ( osd
= ONEOSD
)
996 ERRORS
+= test_failure ( cmd
, "journal-path: BAD_JOURNAL_PATH: No such file or directory" )
998 cmd
= ( CFSD_PREFIX
+ "--journal-path /bin --op list" ). format ( osd
= ONEOSD
)
999 ERRORS
+= test_failure ( cmd
, "journal-path: /bin: (21) Is a directory" )
1001 # On import can't use stdin from a terminal
1002 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1003 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1005 # On import can't use stdin from a terminal
1006 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1007 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
1009 # Specify a bad --type
1010 os
. mkdir ( OSDDIR
+ "/fakeosd" )
1011 cmd
= ( " {path} /ceph-objectstore-tool --data-path " + OSDDIR
+ "/ {osd} --type foobar --op list --pgid {pg} " ). format ( osd
= "fakeosd" , pg
= ONEPG
, path
= CEPH_BIN
)
1012 ERRORS
+= test_failure ( cmd
, "Unable to create store of type foobar" )
1014 # Don't specify a data-path
1015 cmd
= " {path} /ceph-objectstore-tool --type memstore --op list --pgid {pg} " . format ( dir = OSDDIR
, osd
= ONEOSD
, pg
= ONEPG
, path
= CEPH_BIN
)
1016 ERRORS
+= test_failure ( cmd
, "Must provide --data-path" )
1018 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid 2.0" ). format ( osd
= ONEOSD
)
1019 ERRORS
+= test_failure ( cmd
, "Please use export-remove or you must use --force option" )
1021 cmd
= ( CFSD_PREFIX
+ "--force --op remove" ). format ( osd
= ONEOSD
)
1022 ERRORS
+= test_failure ( cmd
, "Must provide pgid" )
1024 # Don't secify a --op nor object command
1025 cmd
= CFSD_PREFIX
. format ( osd
= ONEOSD
)
1026 ERRORS
+= test_failure ( cmd
, "Must provide --op or object command..." )
1028 # Specify a bad --op command
1029 cmd
= ( CFSD_PREFIX
+ "--op oops" ). format ( osd
= ONEOSD
)
1030 ERRORS
+= test_failure ( cmd
, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, dump-import)" )
1032 # Provide just the object param not a command
1033 cmd
= ( CFSD_PREFIX
+ "object" ). format ( osd
= ONEOSD
)
1034 ERRORS
+= test_failure ( cmd
, "Invalid syntax, missing command" )
1036 # Provide an object name that doesn't exist
1037 cmd
= ( CFSD_PREFIX
+ "NON_OBJECT get-bytes" ). format ( osd
= ONEOSD
)
1038 ERRORS
+= test_failure ( cmd
, "No object id 'NON_OBJECT' found" )
1040 # Provide an invalid object command
1041 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} '' notacommand" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1042 ERRORS
+= test_failure ( cmd
, "Unknown object command 'notacommand'" )
1044 cmd
= ( CFSD_PREFIX
+ "foo list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1045 ERRORS
+= test_failure ( cmd
, "No object id 'foo' found or invalid JSON specified" )
1047 cmd
= ( CFSD_PREFIX
+ "'{{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }}' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1048 ERRORS
+= test_failure ( cmd
, "Without --pgid the object '{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }' must be a JSON array" )
1050 cmd
= ( CFSD_PREFIX
+ "'[]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1051 ERRORS
+= test_failure ( cmd
, "Object '[]' must be a JSON array with 2 elements" )
1053 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" ]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1054 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" ]' must be a JSON array with 2 elements" )
1056 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" , 5, 8, 9]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1057 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" , 5, 8, 9]' must be a JSON array with 2 elements" )
1059 cmd
= ( CFSD_PREFIX
+ "'[1, 2]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1060 ERRORS
+= test_failure ( cmd
, "Object '[1, 2]' must be a JSON array with the first element a string" )
1062 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.3 \" ,{{ \" snapid \" : \" not an int \" }}]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1063 ERRORS
+= test_failure ( cmd
, "Decode object JSON error: value type is 2 not 4" )
1065 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
1066 ALLPGS
= OBJREPPGS
+ OBJECPGS
1067 OSDS
= get_osds ( ALLPGS
[ 0 ], OSDDIR
)
1070 print ( "Test all --op dump-journal" )
1071 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1072 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1074 # Test --op list and generate json for all objects
1075 print ( "Test --op list variants" )
1077 # retrieve all objects from all PGs
1078 tmpfd
= open ( TMPFILE
, "wb" )
1079 cmd
= ( CFSD_PREFIX
+ "--op list --format json" ). format ( osd
= osd
)
1081 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1083 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1086 lines
= get_lines ( TMPFILE
)
1087 JSONOBJ
= sorted ( set ( lines
))
1088 ( pgid
, coll
, jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1090 # retrieve all objects in a given PG
1091 tmpfd
= open ( OTHERFILE
, "ab" )
1092 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --format json" ). format ( osd
= osd
, pg
= pgid
)
1094 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1096 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1099 lines
= get_lines ( OTHERFILE
)
1100 JSONOBJ
= sorted ( set ( lines
))
1101 ( other_pgid
, other_coll
, other_jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1103 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1104 logging
. error ( "the first line of --op list is different "
1105 "from the first line of --op list --pgid {pg} " . format ( pg
= pgid
))
1108 # retrieve all objects with a given name in a given PG
1109 tmpfd
= open ( OTHERFILE
, "wb" )
1110 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} {object} --format json" ). format ( osd
= osd
, pg
= pgid
, object = jsondict
[ 'oid' ])
1112 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1114 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1117 lines
= get_lines ( OTHERFILE
)
1118 JSONOBJ
= sorted ( set ( lines
))
1119 ( other_pgid
, other_coll
, other_jsondict
) in json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1121 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1122 logging
. error ( "the first line of --op list is different "
1123 "from the first line of --op list --pgid {pg} {object} " . format ( pg
= pgid
, object = jsondict
[ 'oid' ]))
1126 print ( "Test --op list by generating json for all objects using default format" )
1128 OSDS
= get_osds ( pg
, OSDDIR
)
1130 tmpfd
= open ( TMPFILE
, "ab" )
1131 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1133 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1135 logging
. error ( "Bad exit status {ret} from --op list request" . format ( ret
= ret
))
1139 lines
= get_lines ( TMPFILE
)
1140 JSONOBJ
= sorted ( set ( lines
))
1141 for JSON
in JSONOBJ
:
1142 ( pgid
, jsondict
) = json
. loads ( JSON
)
1143 # Skip clones for now
1144 if jsondict
[ 'snapid' ] != - 2 :
1146 db
[ jsondict
[ 'namespace' ]][ jsondict
[ 'oid' ]][ 'json' ] = json
. dumps (( pgid
, jsondict
))
1147 # print db[jsondict['namespace']][jsondict['oid']]['json']
1148 if jsondict
[ 'oid' ]. find ( EC_NAME
) == 0 and 'shard_id' not in jsondict
:
1149 logging
. error ( "Malformed JSON {json} " . format ( json
= JSON
))
1153 print ( "Test get-bytes and set-bytes" )
1154 for nspace
in db
. keys ():
1155 for basename
in db
[ nspace
]. keys ():
1156 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1157 JSON
= db
[ nspace
][ basename
][ 'json' ]
1158 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1159 TESTNAME
= "/tmp/testbytes. {pid} " . format ( pid
= pid
)
1160 SETNAME
= "/tmp/setbytes. {pid} " . format ( pid
= pid
)
1161 BADNAME
= "/tmp/badbytes. {pid} " . format ( pid
= pid
)
1162 for pg
in OBJREPPGS
:
1163 OSDS
= get_osds ( pg
, OSDDIR
)
1165 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1166 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1167 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1174 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-bytes {fname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, fname
= GETNAME
)
1176 ret
= call ( cmd
, shell
= True )
1178 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1181 cmd
= "diff -q {file} {getfile} " . format ( file = file , getfile
= GETNAME
)
1182 ret
= call ( cmd
, shell
= True )
1184 logging
. error ( "Data from get-bytes differ" )
1185 logging
. debug ( "Got:" )
1186 cat_file ( logging
. DEBUG
, GETNAME
)
1187 logging
. debug ( "Expected:" )
1188 cat_file ( logging
. DEBUG
, file )
1190 fd
= open ( SETNAME
, "w" )
1191 data
= "put-bytes going into {file} \n " . format ( file = file )
1194 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= SETNAME
)
1196 ret
= call ( cmd
, shell
= True )
1198 logging
. error ( "Bad exit status {ret} from set-bytes" . format ( ret
= ret
))
1200 fd
= open ( TESTNAME
, "wb" )
1201 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1203 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1206 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1208 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1210 ret
= call ( cmd
, shell
= True )
1212 logging
. error ( "Data after set-bytes differ" )
1213 logging
. debug ( "Got:" )
1214 cat_file ( logging
. DEBUG
, TESTNAME
)
1215 logging
. debug ( "Expected:" )
1216 cat_file ( logging
. DEBUG
, SETNAME
)
1219 # Use set-bytes with --dry-run and make sure contents haven't changed
1220 fd
= open ( BADNAME
, "w" )
1221 data
= "Bad data for --dry-run in {file} \n " . format ( file = file )
1224 cmd
= ( CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= BADNAME
)
1226 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1228 logging
. error ( "Bad exit status {ret} from set-bytes --dry-run" . format ( ret
= ret
))
1230 fd
= open ( TESTNAME
, "wb" )
1231 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1233 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1236 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1238 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1240 ret
= call ( cmd
, shell
= True )
1242 logging
. error ( "Data after set-bytes --dry-run changed!" )
1243 logging
. debug ( "Got:" )
1244 cat_file ( logging
. DEBUG
, TESTNAME
)
1245 logging
. debug ( "Expected:" )
1246 cat_file ( logging
. DEBUG
, SETNAME
)
1249 fd
= open ( file , "rb" )
1250 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1252 ret
= call ( cmd
, shell
= True , stdin
= fd
)
1254 logging
. error ( "Bad exit status {ret} from set-bytes to restore object" . format ( ret
= ret
))
1275 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1276 print ( "Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap" )
1277 for nspace
in db
. keys ():
1278 for basename
in db
[ nspace
]. keys ():
1279 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1280 JSON
= db
[ nspace
][ basename
][ 'json' ]
1281 for pg
in OBJREPPGS
:
1282 OSDS
= get_osds ( pg
, OSDDIR
)
1284 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1285 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1286 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1289 for key
, val
in db
[ nspace
][ basename
][ "xattr" ]. items ():
1291 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= attrkey
)
1293 getval
= check_output ( cmd
, shell
= True )
1295 logging
. error ( "get-attr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= attrkey
, get
= getval
, orig
= val
))
1298 # set-attr to bogus value "foobar"
1299 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1301 ret
= call ( cmd
, shell
= True )
1303 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1306 # Test set-attr with dry-run
1307 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1309 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1311 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1314 # Check the set-attr
1315 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1317 getval
= check_output ( cmd
, shell
= True )
1319 logging
. error ( "Bad exit status {ret} from get-attr" . format ( ret
= ret
))
1322 if getval
!= "foobar" :
1323 logging
. error ( "Check of set-attr failed because we got {val} " . format ( val
= getval
))
1327 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1329 ret
= call ( cmd
, shell
= True )
1331 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1334 # Check rm-attr with dry-run
1335 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1337 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1339 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1342 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1344 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1346 logging
. error ( "For rm-attr expect get-attr to fail, but it succeeded" )
1349 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
, val
= val
)
1351 ret
= call ( cmd
, shell
= True )
1353 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1357 hdr
= db
[ nspace
][ basename
]. get ( "omapheader" , "" )
1358 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, json
= JSON
)
1360 gethdr
= check_output ( cmd
, shell
= True )
1362 logging
. error ( "get-omaphdr was wrong: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
1365 # set-omaphdr to bogus value "foobar"
1366 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1368 ret
= call ( cmd
, shell
= True )
1370 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1373 # Check the set-omaphdr
1374 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1376 gethdr
= check_output ( cmd
, shell
= True )
1378 logging
. error ( "Bad exit status {ret} from get-omaphdr" . format ( ret
= ret
))
1381 if gethdr
!= "foobar" :
1382 logging
. error ( "Check of set-omaphdr failed because we got {val} " . format ( val
= getval
))
1385 # Test dry-run with set-omaphdr
1386 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1388 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1390 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1394 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
, val
= hdr
)
1396 ret
= call ( cmd
, shell
= True )
1398 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1402 for omapkey
, val
in db
[ nspace
][ basename
][ "omap" ]. items ():
1403 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-omap {key} " ). format ( osd
= osd
, json
= JSON
, key
= omapkey
)
1405 getval
= check_output ( cmd
, shell
= True )
1407 logging
. error ( "get-omap of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= omapkey
, get
= getval
, orig
= val
))
1410 # set-omap to bogus value "foobar"
1411 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1413 ret
= call ( cmd
, shell
= True )
1415 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1418 # Check set-omap with dry-run
1419 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1421 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1423 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1426 # Check the set-omap
1427 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1429 getval
= check_output ( cmd
, shell
= True )
1431 logging
. error ( "Bad exit status {ret} from get-omap" . format ( ret
= ret
))
1434 if getval
!= "foobar" :
1435 logging
. error ( "Check of set-omap failed because we got {val} " . format ( val
= getval
))
1439 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1441 ret
= call ( cmd
, shell
= True )
1443 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1445 # Check rm-omap with dry-run
1446 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1448 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1450 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1452 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1454 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1456 logging
. error ( "For rm-omap expect get-omap to fail, but it succeeded" )
1459 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
, val
= val
)
1461 ret
= call ( cmd
, shell
= True )
1463 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1469 for nspace
in db
. keys ():
1470 for basename
in db
[ nspace
]. keys ():
1471 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1472 JSON
= db
[ nspace
][ basename
][ 'json' ]
1473 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1474 for pg
in OBJREPPGS
:
1475 OSDS
= get_osds ( pg
, OSDDIR
)
1477 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1478 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1479 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1482 if int ( basename
. split ( REP_NAME
)[ 1 ]) > int ( NUM_CLONED_REP_OBJECTS
):
1484 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" snap \" : 1,' > /dev/null" ). format ( osd
= osd
, json
= JSON
)
1486 ret
= call ( cmd
, shell
= True )
1488 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1491 print ( "Test list-attrs get-attr" )
1492 ATTRFILE
= r
"/tmp/attrs. {pid} " . format ( pid
= pid
)
1493 VALFILE
= r
"/tmp/val. {pid} " . format ( pid
= pid
)
1494 for nspace
in db
. keys ():
1495 for basename
in db
[ nspace
]. keys ():
1496 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
)
1497 JSON
= db
[ nspace
][ basename
][ 'json' ]
1498 jsondict
= json
. loads ( JSON
)
1500 if 'shard_id' in jsondict
:
1501 logging
. debug ( "ECobject " + JSON
)
1504 OSDS
= get_osds ( pg
, OSDDIR
)
1505 # Fix shard_id since we only have one json instance for each object
1506 jsondict
[ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1507 JSON
= json
. dumps ( jsondict
)
1509 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-attr hinfo_key" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1510 logging
. debug ( "TRY: " + cmd
)
1512 out
= check_output ( cmd
, shell
= True , stderr
= subprocess
. STDOUT
)
1513 logging
. debug ( "FOUND: {json} in {osd} has value ' {val} '" . format ( osd
= osd
, json
= JSON
, val
= out
))
1515 except subprocess
. CalledProcessError
as e
:
1516 if "No such file or directory" not in e
. output
and "No data available" not in e
. output
:
1518 # Assuming k=2 m=1 for the default ec pool
1520 logging
. error ( " {json} hinfo_key found {found} times instead of 3" . format ( json
= JSON
, found
= found
))
1524 # Make sure rep obj with rep pg or ec obj with ec pg
1525 if ( 'shard_id' in jsondict
) != ( pg
. find ( 's' ) > 0 ):
1527 if 'shard_id' in jsondict
:
1528 # Fix shard_id since we only have one json instance for each object
1529 jsondict
[ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1530 JSON
= json
. dumps ( jsondict
)
1531 OSDS
= get_osds ( pg
, OSDDIR
)
1533 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1534 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1535 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1538 afd
= open ( ATTRFILE
, "wb" )
1539 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' list-attrs" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1541 ret
= call ( cmd
, shell
= True , stdout
= afd
)
1544 logging
. error ( "list-attrs failed with {ret} " . format ( ret
= ret
))
1547 keys
= get_lines ( ATTRFILE
)
1548 values
= dict ( db
[ nspace
][ basename
][ "xattr" ])
1550 if key
== "_" or key
== "snapset" or key
== "hinfo_key" :
1552 key
= key
. strip ( "_" )
1553 if key
not in values
:
1554 logging
. error ( "Unexpected key {key} present" . format ( key
= key
))
1557 exp
= values
. pop ( key
)
1558 vfd
= open ( VALFILE
, "wb" )
1559 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= "_" + key
)
1561 ret
= call ( cmd
, shell
= True , stdout
= vfd
)
1564 logging
. error ( "get-attr failed with {ret} " . format ( ret
= ret
))
1567 lines
= get_lines ( VALFILE
)
1570 logging
. error ( "For key {key} got value {got} instead of {expected} " . format ( key
= key
, got
= val
, expected
= exp
))
1572 if len ( values
) != 0 :
1573 logging
. error ( "Not all keys found, remaining keys:" )
1576 print ( "Test --op meta-list" )
1577 tmpfd
= open ( TMPFILE
, "wb" )
1578 cmd
= ( CFSD_PREFIX
+ "--op meta-list" ). format ( osd
= ONEOSD
)
1580 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1582 logging
. error ( "Bad exit status {ret} from --op meta-list request" . format ( ret
= ret
))
1585 print ( "Test get-bytes on meta" )
1587 lines
= get_lines ( TMPFILE
)
1588 JSONOBJ
= sorted ( set ( lines
))
1589 for JSON
in JSONOBJ
:
1590 ( pgid
, jsondict
) = json
. loads ( JSON
)
1592 logging
. error ( "pgid incorrect for --op meta-list {pgid} " . format ( pgid
= pgid
))
1594 if jsondict
[ 'namespace' ] != "" :
1595 logging
. error ( "namespace non null --op meta-list {ns} " . format ( ns
= jsondict
[ 'namespace' ]))
1602 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-bytes {fname} " ). format ( osd
= ONEOSD
, json
= JSON
, fname
= GETNAME
)
1604 ret
= call ( cmd
, shell
= True )
1606 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1618 print ( "Test pg info" )
1619 for pg
in ALLREPPGS
+ ALLECPGS
:
1620 for osd
in get_osds ( pg
, OSDDIR
):
1621 cmd
= ( CFSD_PREFIX
+ "--op info --pgid {pg} | grep ' \" pgid \" : \" {pg} \" '" ). format ( osd
= osd
, pg
= pg
)
1623 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1625 logging
. error ( "Getting info failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1628 print ( "Test pg logging" )
1629 if len ( ALLREPPGS
+ ALLECPGS
) == len ( OBJREPPGS
+ OBJECPGS
):
1630 logging
. warning ( "All PGs have objects, so no log without modify entries" )
1631 for pg
in ALLREPPGS
+ ALLECPGS
:
1632 for osd
in get_osds ( pg
, OSDDIR
):
1633 tmpfd
= open ( TMPFILE
, "wb" )
1634 cmd
= ( CFSD_PREFIX
+ "--op log --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1636 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1638 logging
. error ( "Getting log failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1640 HASOBJ
= pg
in OBJREPPGS
+ OBJECPGS
1642 for line
in get_lines ( TMPFILE
):
1643 if line
. find ( "modify" ) != - 1 :
1646 if HASOBJ
!= MODOBJ
:
1647 logging
. error ( "Bad log for pg {pg} from {osd} " . format ( pg
= pg
, osd
= osd
))
1648 MSG
= ( HASOBJ
and [ "" ] or [ "NOT " ])[ 0 ]
1649 print ( "Log should {msg} have a modify entry" . format ( msg
= MSG
))
1657 print ( "Test list-pgs" )
1658 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1660 CHECK_PGS
= get_osd_pgs ( os
. path
. join ( OSDDIR
, osd
), None )
1661 CHECK_PGS
= sorted ( CHECK_PGS
)
1663 cmd
= ( CFSD_PREFIX
+ "--op list-pgs" ). format ( osd
= osd
)
1665 TEST_PGS
= check_output ( cmd
, shell
= True ). split ( " \n " )
1666 TEST_PGS
= sorted ( TEST_PGS
)[ 1 :] # Skip extra blank line
1668 if TEST_PGS
!= CHECK_PGS
:
1669 logging
. error ( "list-pgs got wrong result for osd. {osd} " . format ( osd
= osd
))
1670 logging
. error ( "Expected {pgs} " . format ( pgs
= CHECK_PGS
))
1671 logging
. error ( "Got {pgs} " . format ( pgs
= TEST_PGS
))
1675 print ( "Test pg export --dry-run" )
1677 osd
= get_osds ( pg
, OSDDIR
)[ 0 ]
1678 fname
= "/tmp/fname. {pid} " . format ( pid
= pid
)
1679 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1681 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1683 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1685 elif os
. path
. exists ( fname
):
1686 logging
. error ( "Exporting --dry-run created file" )
1689 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1691 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1693 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1696 outdata
= get_lines ( fname
)
1697 if len ( outdata
) > 0 :
1698 logging
. error ( "Exporting --dry-run to stdout not empty" )
1699 logging
. error ( "Data: " + outdata
)
1703 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1704 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1705 print ( "Test pg export" )
1706 for pg
in ALLREPPGS
+ ALLECPGS
:
1707 for osd
in get_osds ( pg
, OSDDIR
):
1708 mydir
= os
. path
. join ( TESTDIR
, osd
)
1709 fname
= os
. path
. join ( mydir
, pg
)
1710 if pg
== ALLREPPGS
[ 0 ]:
1711 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1712 elif pg
== ALLREPPGS
[ 1 ]:
1713 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file - > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1715 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1717 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1719 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1722 ERRORS
+= EXP_ERRORS
1724 print ( "Test pg removal" )
1726 for pg
in ALLREPPGS
+ ALLECPGS
:
1727 for osd
in get_osds ( pg
, OSDDIR
):
1728 # This should do nothing
1729 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} --dry-run" ). format ( pg
= pg
, osd
= osd
)
1731 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1733 logging
. error ( "Removing --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1735 cmd
= ( CFSD_PREFIX
+ "--force --op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1737 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1739 logging
. error ( "Removing failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1745 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 :
1746 print ( "Test pg import" )
1747 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1748 dir = os
. path
. join ( TESTDIR
, osd
)
1749 PGS
= [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]
1751 file = os
. path
. join ( dir , pg
)
1752 # Make sure this doesn't crash
1753 cmd
= ( CFSD_PREFIX
+ "--op dump-import --file {file} " ). format ( osd
= osd
, file = file )
1755 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1757 logging
. error ( "Dump-import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1759 # This should do nothing
1760 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} --dry-run" ). format ( osd
= osd
, file = file )
1762 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1764 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1767 cmd
= ( "cat {file} |" . format ( file = file ) + CFSD_PREFIX
+ "--op import" ). format ( osd
= osd
)
1769 cmd
= ( CFSD_PREFIX
+ "--op import --file - --pgid {pg} < {file} " ). format ( osd
= osd
, file = file , pg
= pg
)
1771 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} " ). format ( osd
= osd
, file = file )
1773 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1775 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1778 logging
. warning ( "SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES" )
1780 ERRORS
+= IMP_ERRORS
1783 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1784 print ( "Verify replicated import data" )
1785 data_errors
, _
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, REP_NAME
)
1786 ERRORS
+= data_errors
1788 logging
. warning ( "SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES" )
1790 print ( "Test all --op dump-journal again" )
1791 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1792 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1797 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1798 print ( "Verify erasure coded import data" )
1799 ERRORS
+= verify ( DATADIR
, EC_POOL
, EC_NAME
, db
)
1800 # Check replicated data/xattr/omap using rados
1801 print ( "Verify replicated import data using rados" )
1802 ERRORS
+= verify ( DATADIR
, REP_POOL
, REP_NAME
, db
)
1805 NEWPOOL
= "rados-import-pool"
1806 cmd
= " {path} /rados mkpool {pool} " . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1808 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1810 print ( "Test rados import" )
1812 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1813 dir = os
. path
. join ( TESTDIR
, osd
)
1814 for pg
in [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]:
1815 if pg
. find ( " {id} ." . format ( id = REPID
)) != 0 :
1817 file = os
. path
. join ( dir , pg
)
1820 # This should do nothing
1821 cmd
= " {path} /rados import -p {pool} --dry-run {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1823 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1825 logging
. error ( "Rados import --dry-run failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1827 cmd
= " {path} /rados -p {pool} ls" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1829 data
= check_output ( cmd
, shell
= True )
1831 logging
. error ( "' {data} '" . format ( data
= data
))
1832 logging
. error ( "Found objects after dry-run" )
1834 cmd
= " {path} /rados import -p {pool} {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1836 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1838 logging
. error ( "Rados import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1840 cmd
= " {path} /rados import -p {pool} --no-overwrite {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1842 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1844 logging
. error ( "Rados import --no-overwrite failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1847 ERRORS
+= verify ( DATADIR
, NEWPOOL
, REP_NAME
, db
)
1849 logging
. warning ( "SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES" )
1851 # Clear directories of previous portion
1852 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1853 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1857 # Cause SPLIT_POOL to split and test import with object/log filtering
1858 print ( "Testing import all objects after a split" )
1859 SPLIT_POOL
= "split_pool"
1862 SPLIT_NSPACE_COUNT
= 2
1863 SPLIT_NAME
= "split"
1864 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= SPLIT_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
1866 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1867 SPLITID
= get_pool_id ( SPLIT_POOL
, nullfd
)
1868 pool_size
= int ( check_output ( " {path} /ceph osd pool get {pool} size" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
), shell
= True , stderr
= nullfd
). split ( " " )[ 1 ])
1873 objects
= range ( 1 , SPLIT_OBJ_COUNT
+ 1 )
1874 nspaces
= range ( SPLIT_NSPACE_COUNT
)
1876 nspace
= get_nspace ( n
)
1879 NAME
= SPLIT_NAME
+ " {num} " . format ( num
= i
)
1880 LNAME
= nspace
+ "-" + NAME
1881 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
1884 cmd
= "rm -f " + DDNAME
1886 call ( cmd
, shell
= True )
1889 dataline
= range ( DATALINECOUNT
)
1892 fd
= open ( DDNAME
, "w" )
1893 data
= "This is the split data for " + LNAME
+ " \n "
1898 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= SPLIT_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
1900 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
1902 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
1908 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1909 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1911 pg
= " {pool} .0" . format ( pool
= SPLITID
)
1914 export_osds
= get_osds ( pg
, OSDDIR
)
1915 for osd
in export_osds
:
1916 mydir
= os
. path
. join ( TESTDIR
, osd
)
1917 fname
= os
. path
. join ( mydir
, pg
)
1918 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1920 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1922 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1925 ERRORS
+= EXP_ERRORS
1931 cmd
= " {path} /ceph osd pool set {pool} pg_num 2" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
)
1933 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1939 # Now 2 PGs, poolid.0 and poolid.1
1940 for seed
in range ( 2 ):
1941 pg
= " {pool} . {seed} " . format ( pool
= SPLITID
, seed
= seed
)
1944 for osd
in get_osds ( pg
, OSDDIR
):
1945 cmd
= ( CFSD_PREFIX
+ "--force --op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1947 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1949 # This is weird. The export files are based on only the EXPORT_PG
1950 # and where that pg was before the split. Use 'which' to use all
1951 # export copies in import.
1952 mydir
= os
. path
. join ( TESTDIR
, export_osds
[ which
])
1953 fname
= os
. path
. join ( mydir
, EXPORT_PG
)
1955 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1957 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1959 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1962 ERRORS
+= IMP_ERRORS
1964 # Start up again to make sure imports didn't corrupt anything
1966 print ( "Verify split import data" )
1967 data_errors
, count
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
)
1968 ERRORS
+= data_errors
1969 if count
!= ( SPLIT_OBJ_COUNT
* SPLIT_NSPACE_COUNT
* pool_size
):
1970 logging
. error ( "Incorrect number of replicas seen {count} " . format ( count
= count
))
1975 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1976 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1978 ERRORS
+= test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
)
1980 # vstart() starts 4 OSDs
1981 ERRORS
+= test_get_set_osdmap ( CFSD_PREFIX
, list ( range ( 4 )), ALLOSDS
)
1982 ERRORS
+= test_get_set_inc_osdmap ( CFSD_PREFIX
, ALLOSDS
[ 0 ])
1985 CORES
= [ f
for f
in os
. listdir ( CEPH_DIR
) if f
. startswith ( "core." )]
1987 CORE_DIR
= os
. path
. join ( "/tmp" , "cores. {pid} " . format ( pid
= os
. getpid ()))
1989 call ( "/bin/mv {ceph_dir} /core.* {core_dir} " . format ( ceph_dir
= CEPH_DIR
, core_dir
= CORE_DIR
), shell
= True )
1990 logging
. error ( "Failure due to cores found" )
1991 logging
. error ( "See {core_dir} for cores" . format ( core_dir
= CORE_DIR
))
1992 ERRORS
+= len ( CORES
)
1995 print ( "TEST PASSED" )
1998 print ( "TEST FAILED WITH {errcount} ERRORS" . format ( errcount
= ERRORS
))
2002 def remove_btrfs_subvolumes ( path
):
2003 if platform
. system () == "FreeBSD" :
2005 result
= subprocess
. Popen ( "stat -f -c ' %% T' %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
2006 for line
in result
. stdout
:
2007 filesystem
= decode ( line
). rstrip ( ' \n ' )
2008 if filesystem
== "btrfs" :
2009 result
= subprocess
. Popen ( "sudo btrfs subvolume list %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
2010 for line
in result
. stdout
:
2011 subvolume
= decode ( line
). split ()[ 8 ]
2012 # extracting the relative volume name
2013 m
= re
. search ( ".*( %s .*)" % path
, subvolume
)
2016 call ( "sudo btrfs subvolume delete %s " % found
, shell
= True )
2019 if __name__
== "__main__" :
2022 status
= main ( sys
. argv
[ 1 :])
2025 os
. chdir ( CEPH_BUILD_DIR
)
2026 remove_btrfs_subvolumes ( CEPH_DIR
)
2027 call ( "/bin/rm -fr {dir} " . format ( dir = CEPH_DIR
), shell
= True )