]>
git.proxmox.com Git - ceph.git/blob - ceph/src/test/ceph_objectstore_tool.py
3 from __future__
import print_function
4 from subprocess
import call
6 from subprocess
import check_output
8 def check_output (* popenargs
, ** kwargs
):
10 # backported from python 2.7 stdlib
11 process
= subprocess
. Popen (
12 stdout
= subprocess
. PIPE
, * popenargs
, ** kwargs
)
13 output
, unused_err
= process
. communicate ()
14 retcode
= process
. poll ()
16 cmd
= kwargs
. get ( "args" )
19 error
= subprocess
. CalledProcessError ( retcode
, cmd
)
37 from subprocess
import DEVNULL
39 DEVNULL
= open ( os
. devnull
, "wb" )
41 logging
. basicConfig ( format
= ' %(levelname)s : %(message)s ' , level
= logging
. WARNING
)
44 if sys
. version_info
[ 0 ] >= 3 :
46 return s
. decode ( 'utf-8' )
48 def check_output (* args
, ** kwargs
):
49 return decode ( subprocess
. check_output (* args
, ** kwargs
))
56 def wait_for_health ():
57 print ( "Wait for health_ok..." , end
= "" )
59 while call ( " {path} /ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null" . format ( path
= CEPH_BIN
), shell
= True ) == 0 :
62 raise Exception ( "Time exceeded to go to health" )
67 def get_pool_id ( name
, nullfd
):
68 cmd
= " {path} /ceph osd pool stats {pool} " . format ( pool
= name
, path
= CEPH_BIN
). split ()
69 # pool {pool} id # .... grab the 4 field
70 return check_output ( cmd
, stderr
= nullfd
). split ()[ 3 ]
73 # return a list of unique PGS given an osd subdirectory
74 def get_osd_pgs ( SUBDIR
, ID
):
77 endhead
= re
. compile ( " {id} .*_head$" . format ( id = ID
))
78 DIR
= os
. path
. join ( SUBDIR
, "current" )
79 PGS
+= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and ( ID
is None or endhead
. match ( f
))]
80 PGS
= [ re
. sub ( "_head" , "" , p
) for p
in PGS
if "_head" in p
]
84 # return a sorted list of unique PGs given a directory
86 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
89 SUBDIR
= os
. path
. join ( DIR
, d
)
90 PGS
+= get_osd_pgs ( SUBDIR
, ID
)
91 return sorted ( set ( PGS
))
94 # return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified
95 def get_objs ( ALLPGS
, prefix
, DIR
, ID
):
96 OSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
99 DIRL2
= os
. path
. join ( DIR
, d
)
100 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
103 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
105 FINALDIR
= os
. path
. join ( SUBDIR
, PGDIR
)
106 # See if there are any objects there
107 if any ( f
for f
in [ val
for _
, _
, fl
in os
. walk ( FINALDIR
) for val
in fl
] if f
. startswith ( prefix
)):
109 return sorted ( set ( PGS
))
112 # return a sorted list of OSDS which have data from a given PG
113 def get_osds ( PG
, DIR
):
114 ALLOSDS
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isdir ( os
. path
. join ( DIR
, f
)) and f
. find ( "osd" ) == 0 ]
117 DIRL2
= os
. path
. join ( DIR
, d
)
118 SUBDIR
= os
. path
. join ( DIRL2
, "current" )
120 if not os
. path
. isdir ( os
. path
. join ( SUBDIR
, PGDIR
)):
126 def get_lines ( filename
):
127 tmpfd
= open ( filename
, "r" )
131 line
= tmpfd
. readline (). rstrip ( ' \n ' )
139 def cat_file ( level
, filename
):
140 if level
< logging
. getLogger (). getEffectiveLevel ():
142 print ( "File: " + filename
)
143 with
open ( filename
, "r" ) as f
:
145 line
= f
. readline (). rstrip ( ' \n ' )
152 def vstart ( new
, opt
= "" ):
153 print ( "vstarting...." , end
= "" )
154 NEW
= new
and "-n" or "-N"
155 call ( "MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 {path} /src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1" . format ( new
= NEW
, opt
= opt
, path
= CEPH_ROOT
), shell
= True )
159 def test_failure ( cmd
, errmsg
, tty
= False ):
162 ttyfd
= open ( "/dev/tty" , "rwb" )
163 except Exception as e
:
165 logging
. info ( "SKIP " + cmd
)
167 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
168 tmpfd
= open ( TMPFILE
, "wb" )
172 ret
= call ( cmd
, shell
= True , stdin
= ttyfd
, stdout
= ttyfd
, stderr
= tmpfd
)
175 ret
= call ( cmd
, shell
= True , stderr
= tmpfd
)
179 logging
. error ( "Should have failed, but got exit 0" )
181 lines
= get_lines ( TMPFILE
)
182 matched
= [ l
for l
in lines
if errmsg
in l
]
184 logging
. info ( "Correctly failed with message \" " + matched
[ 0 ] + " \" " )
187 logging
. error ( "Command: " + cmd
)
188 logging
. error ( "Bad messages to stderr \" " + str ( lines
) + " \" " )
189 logging
. error ( "Expected \" " + errmsg
+ " \" " )
196 return "ns {num} " . format ( num
= num
)
199 def verify ( DATADIR
, POOL
, NAME_PREFIX
, db
):
200 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
202 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( NAME_PREFIX
) == 0 ]:
203 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
204 clone
= rawnsfile
. split ( "__" )[ 1 ]
205 nspace
= nsfile
. split ( "-" )[ 0 ]
206 file = nsfile
. split ( "-" )[ 1 ]
210 path
= os
. path
. join ( DATADIR
, rawnsfile
)
215 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' get {file} {out} " . format ( pool
= POOL
, file = file , out
= TMPFILE
, nspace
= nspace
, path
= CEPH_BIN
)
217 call ( cmd
, shell
= True , stdout
= DEVNULL
, stderr
= DEVNULL
)
218 cmd
= "diff -q {src} {result} " . format ( src
= path
, result
= TMPFILE
)
220 ret
= call ( cmd
, shell
= True )
222 logging
. error ( " {file} data not imported properly" . format ( file = file ))
228 for key
, val
in db
[ nspace
][ file ][ "xattr" ]. items ():
229 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getxattr {name} {key} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, path
= CEPH_BIN
)
231 getval
= check_output ( cmd
, shell
= True , stderr
= DEVNULL
)
232 logging
. debug ( "getxattr {key} {val} " . format ( key
= key
, val
= getval
))
234 logging
. error ( "getxattr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= key
, get
= getval
, orig
= val
))
237 hdr
= db
[ nspace
][ file ]. get ( "omapheader" , "" )
238 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapheader {name} {file} " . format ( pool
= POOL
, name
= file , nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
240 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
242 logging
. error ( "rados getomapheader returned {ret} " . format ( ret
= ret
))
245 getlines
= get_lines ( TMPFILE
)
246 assert ( len ( getlines
) == 0 or len ( getlines
) == 1 )
247 if len ( getlines
) == 0 :
251 logging
. debug ( "header: {hdr} " . format ( hdr
= gethdr
))
253 logging
. error ( "getomapheader returned wrong val: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
255 for key
, val
in db
[ nspace
][ file ][ "omap" ]. items ():
256 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' getomapval {name} {key} {file} " . format ( pool
= POOL
, name
= file , key
= key
, nspace
= nspace
, file = TMPFILE
, path
= CEPH_BIN
)
258 ret
= call ( cmd
, shell
= True , stderr
= DEVNULL
)
260 logging
. error ( "getomapval returned {ret} " . format ( ret
= ret
))
263 getlines
= get_lines ( TMPFILE
)
264 if len ( getlines
) != 1 :
265 logging
. error ( "Bad data from getomapval {lines} " . format ( lines
= getlines
))
269 logging
. debug ( "getomapval {key} {val} " . format ( key
= key
, val
= getval
))
271 logging
. error ( "getomapval returned wrong val: {get} instead of {orig} " . format ( get
= getval
, orig
= val
))
280 def check_journal ( jsondict
):
282 if 'header' not in jsondict
:
283 logging
. error ( "Key 'header' not in dump-journal" )
285 elif 'max_size' not in jsondict
[ 'header' ]:
286 logging
. error ( "Key 'max_size' not in dump-journal header" )
289 print ( " \t Journal max_size = {size} " . format ( size
= jsondict
[ 'header' ][ 'max_size' ]))
290 if 'entries' not in jsondict
:
291 logging
. error ( "Key 'entries' not in dump-journal output" )
293 elif len ( jsondict
[ 'entries' ]) == 0 :
294 logging
. info ( "No entries in journal found" )
296 errors
+= check_journal_entries ( jsondict
[ 'entries' ])
300 def check_journal_entries ( entries
):
302 for enum
in range ( len ( entries
)):
303 if 'offset' not in entries
[ enum
]:
304 logging
. error ( "No 'offset' key in entry {e} " . format ( e
= enum
))
306 if 'seq' not in entries
[ enum
]:
307 logging
. error ( "No 'seq' key in entry {e} " . format ( e
= enum
))
309 if 'transactions' not in entries
[ enum
]:
310 logging
. error ( "No 'transactions' key in entry {e} " . format ( e
= enum
))
312 elif len ( entries
[ enum
][ 'transactions' ]) == 0 :
313 logging
. error ( "No transactions found in entry {e} " . format ( e
= enum
))
316 errors
+= check_entry_transactions ( entries
[ enum
], enum
)
320 def check_entry_transactions ( entry
, enum
):
322 for tnum
in range ( len ( entry
[ 'transactions' ])):
323 if 'trans_num' not in entry
[ 'transactions' ][ tnum
]:
324 logging
. error ( "Key 'trans_num' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
326 elif entry
[ 'transactions' ][ tnum
][ 'trans_num' ] != tnum
:
327 ft
= entry
[ 'transactions' ][ tnum
][ 'trans_num' ]
328 logging
. error ( "Bad trans_num ( {ft} ) entry {e} trans {t} " . format ( ft
= ft
, e
= enum
, t
= tnum
))
330 if 'ops' not in entry
[ 'transactions' ][ tnum
]:
331 logging
. error ( "Key 'ops' missing from entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
334 errors
+= check_transaction_ops ( entry
[ 'transactions' ][ tnum
][ 'ops' ], enum
, tnum
)
338 def check_transaction_ops ( ops
, enum
, tnum
):
340 logging
. warning ( "No ops found in entry {e} trans {t} " . format ( e
= enum
, t
= tnum
))
342 for onum
in range ( len ( ops
)):
343 if 'op_num' not in ops
[ onum
]:
344 logging
. error ( "Key 'op_num' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
346 elif ops
[ onum
][ 'op_num' ] != onum
:
347 fo
= ops
[ onum
][ 'op_num' ]
348 logging
. error ( "Bad op_num ( {fo} ) from entry {e} trans {t} op {o} " . format ( fo
= fo
, e
= enum
, t
= tnum
, o
= onum
))
350 if 'op_name' not in ops
[ onum
]:
351 logging
. error ( "Key 'op_name' missing from entry {e} trans {t} op {o} " . format ( e
= enum
, t
= tnum
, o
= onum
))
356 def test_dump_journal ( CFSD_PREFIX
, osds
):
359 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
362 # Test --op dump-journal by loading json
363 cmd
= ( CFSD_PREFIX
+ "--op dump-journal --format json" ). format ( osd
= osd
)
365 tmpfd
= open ( TMPFILE
, "wb" )
366 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
368 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
372 tmpfd
= open ( TMPFILE
, "r" )
373 jsondict
= json
. load ( tmpfd
)
377 journal_errors
= check_journal ( jsondict
)
378 if journal_errors
is not 0 :
379 logging
. error ( jsondict
)
380 ERRORS
+= journal_errors
384 CEPH_BUILD_DIR
= os
. environ
. get ( 'CEPH_BUILD_DIR' )
385 CEPH_BIN
= os
. environ
. get ( 'CEPH_BIN' )
386 CEPH_ROOT
= os
. environ
. get ( 'CEPH_ROOT' )
388 if not CEPH_BUILD_DIR
:
389 CEPH_BUILD_DIR
= os
. getcwd ()
390 os
. putenv ( 'CEPH_BUILD_DIR' , CEPH_BUILD_DIR
)
391 CEPH_BIN
= CEPH_BUILD_DIR
392 os
. putenv ( 'CEPH_BIN' , CEPH_BIN
)
393 CEPH_ROOT
= os
. path
. dirname ( CEPH_BUILD_DIR
)
394 os
. putenv ( 'CEPH_ROOT' , CEPH_ROOT
)
395 CEPH_LIB
= os
. path
. join ( CEPH_BIN
, '.libs' )
396 os
. putenv ( 'CEPH_LIB' , CEPH_LIB
)
398 CEPH_DIR
= CEPH_BUILD_DIR
+ "/cot_dir"
399 CEPH_CONF
= os
. path
. join ( CEPH_DIR
, 'ceph.conf' )
402 call ( " {path} /init-ceph -c {conf} stop > /dev/null 2>&1" . format ( conf
= CEPH_CONF
, path
= CEPH_BIN
), shell
= True )
405 def check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
):
408 for rawnsfile
in [ f
for f
in os
. listdir ( DATADIR
) if f
. split ( '-' )[ 1 ]. find ( SPLIT_NAME
) == 0 ]:
409 nsfile
= rawnsfile
. split ( "__" )[ 0 ]
410 clone
= rawnsfile
. split ( "__" )[ 1 ]
411 nspace
= nsfile
. split ( "-" )[ 0 ]
412 file = nsfile
. split ( "-" )[ 1 ] + "__" + clone
416 path
= os
. path
. join ( DATADIR
, rawnsfile
)
417 tmpfd
= open ( TMPFILE
, "wb" )
418 cmd
= "find {dir} -name ' {file} _*_ {nspace} _*'" . format ( dir = OSDDIR
, file = file , nspace
= nspace
)
420 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
422 logging
. critical ( "INTERNAL ERROR" )
425 obj_locs
= get_lines ( TMPFILE
)
426 if len ( obj_locs
) == 0 :
427 logging
. error ( "Can't find imported object {name} " . format ( name
= file ))
429 for obj_loc
in obj_locs
:
430 # For btrfs skip snap_* dirs
431 if re
. search ( "/snap_[0-9]*/" , obj_loc
) is not None :
434 cmd
= "diff -q {src} {obj_loc} " . format ( src
= path
, obj_loc
= obj_loc
)
436 ret
= call ( cmd
, shell
= True )
438 logging
. error ( " {file} data not imported properly into {obj} " . format ( file = file , obj
= obj_loc
))
440 return ERRORS
, repcount
443 def set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
444 # change the weight of osd.0 to math.pi in the newest osdmap of given osd
445 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
446 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
447 osdmap_file
= osdmap_file
. name
)
448 output
= check_output ( cmd
, shell
= True )
449 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
451 new_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
452 old_crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
453 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
454 crush_file
= old_crush_file
. name
, path
= CEPH_BIN
),
460 for osd_id
in osd_ids
:
461 cmd
= " {path} /crushtool -i {crush_file} --reweight-item osd. {osd} {weight} -o {new_crush_file} " . format ( osd
= osd_id
,
462 crush_file
= old_crush_file
. name
,
464 new_crush_file
= new_crush_file
. name
, path
= CEPH_BIN
)
465 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
467 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
469 # change them back, since we don't need to preapre for another round
470 old_crush_file
, new_crush_file
= new_crush_file
, old_crush_file
471 old_crush_file
. close ()
473 ret
= call ( " {path} /osdmaptool --import-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
474 crush_file
= new_crush_file
. name
, path
= CEPH_BIN
),
480 # Minimum test of --dry-run by using it, but not checking anything
481 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run"
482 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
483 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
486 # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool
487 # to use use a different epoch than the one in osdmap
488 cmd
= CFSD_PREFIX
+ "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force"
489 cmd
= cmd
. format ( osd
= osd_path
, osdmap_file
= osdmap_file
. name
, epoch
= epoch
)
490 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
494 def get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
):
495 osdmap_file
= tempfile
. NamedTemporaryFile ( delete
= True )
496 cmd
= ( CFSD_PREFIX
+ "--op get-osdmap --file {osdmap_file} " ). format ( osd
= osd_path
,
497 osdmap_file
= osdmap_file
. name
)
498 ret
= call ( cmd
, stdout
= DEVNULL
, shell
= True )
501 # we have to read the weights from the crush map, even we can query the weights using
502 # osdmaptool, but please keep in mind, they are different:
503 # item weights in crush map versus weight associated with each osd in osdmap
504 crush_file
= tempfile
. NamedTemporaryFile ( delete
= True )
505 ret
= call ( " {path} /osdmaptool --export-crush {crush_file} {osdmap_file} " . format ( osdmap_file
= osdmap_file
. name
,
506 crush_file
= crush_file
. name
, path
= CEPH_BIN
),
510 output
= check_output ( " {path} /crushtool --tree -i {crush_file} | tail -n {num_osd} " . format ( crush_file
= crush_file
. name
,
511 num_osd
= len ( osd_ids
), path
= CEPH_BIN
),
515 for line
in output
. strip (). split ( ' \n ' ):
516 osd_id
, weight
, osd_name
= re
. split ( '\s+' , line
)
517 weights
. append ( float ( weight
))
522 def test_get_set_osdmap ( CFSD_PREFIX
, osd_ids
, osd_paths
):
523 print ( "Testing get-osdmap and set-osdmap" )
526 weight
= 1 / math
. e
# just some magic number in [0, 1]
528 for osd_path
in osd_paths
:
529 if set_osd_weight ( CFSD_PREFIX
, osd_ids
, osd_path
, weight
):
530 changed
. append ( osd_path
)
532 logging
. warning ( "Failed to change the weights: {0} " . format ( osd_path
))
533 # i am pissed off if none of the store gets changed
537 for osd_path
in changed
:
538 weights
= get_osd_weights ( CFSD_PREFIX
, osd_ids
, osd_path
)
542 if any ( abs ( w
- weight
) > 1e-5 for w
in weights
):
543 logging
. warning ( "Weight is not changed: {0} != {1} " . format ( weights
, weight
))
547 def test_get_set_inc_osdmap ( CFSD_PREFIX
, osd_path
):
548 # incrementals are not used unless we need to build an MOSDMap to update
549 # OSD's peers, so an obvious way to test it is simply overwrite an epoch
550 # with a different copy, and read it back to see if it matches.
552 file_e2
= tempfile
. NamedTemporaryFile ( delete
= True )
553 cmd
= ( CFSD_PREFIX
+ "--op get-inc-osdmap --file {file} " ). format ( osd
= osd_path
,
555 output
= check_output ( cmd
, shell
= True )
556 epoch
= int ( re
. findall ( '#(\d+)' , output
)[ 0 ])
557 # backup e1 incremental before overwriting it
559 file_e1_backup
= tempfile
. NamedTemporaryFile ( delete
= True )
560 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
561 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
563 # overwrite e1 with e2
564 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --force --epoch {epoch} --file {file} "
565 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e2
. name
), shell
= True )
567 # Use dry-run to set back to e1 which shouldn't happen
568 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file} "
569 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
572 file_e1_read
= tempfile
. NamedTemporaryFile ( delete
= True )
573 cmd
= CFSD_PREFIX
+ "--op get-inc-osdmap --epoch {epoch} --file {file} "
574 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_read
. name
), shell
= True )
578 if not filecmp
. cmp ( file_e2
. name
, file_e1_read
. name
, shallow
= False ):
579 logging
. error ( "{{get,set}}-inc-osdmap mismatch {0} != {1} " . format ( file_e2
. name
, file_e1_read
. name
))
582 # revert the change with file_e1_backup
583 cmd
= CFSD_PREFIX
+ "--op set-inc-osdmap --epoch {epoch} --file {file} "
584 ret
= call ( cmd
. format ( osd
= osd_path
, epoch
= epoch
, file = file_e1_backup
. name
), shell
= True )
586 logging
. error ( "Failed to revert the changed inc-osdmap" )
592 def test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
):
594 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= os
. getpid ())
595 nullfd
= open ( os
. devnull
, "w" )
597 print ( "Test removeall" )
599 for nspace
in db
. keys ():
600 for basename
in db
[ nspace
]. keys ():
601 JSON
= db
[ nspace
][ basename
][ 'json' ]
603 OSDS
= get_osds ( pg
, OSDDIR
)
605 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
606 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
607 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
611 if int ( basename
. split ( REP_NAME
)[ 1 ]) <= int ( NUM_CLONED_REP_OBJECTS
):
612 cmd
= ( CFSD_PREFIX
+ "' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
613 errors
+= test_failure ( cmd
, "Snapshots are present, use removeall to delete everything" )
615 cmd
= ( CFSD_PREFIX
+ " --force --dry-run ' {json} ' remove" ). format ( osd
= osd
, json
= JSON
)
617 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
619 logging
. error ( "remove with --force failed for {json} " . format ( json
= JSON
))
622 cmd
= ( CFSD_PREFIX
+ " --dry-run ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
624 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
626 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
629 cmd
= ( CFSD_PREFIX
+ " ' {json} ' removeall" ). format ( osd
= osd
, json
= JSON
)
631 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
633 logging
. error ( "removeall failed for {json} " . format ( json
= JSON
))
636 tmpfd
= open ( TMPFILE
, "w" )
637 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --namespace {ns} {name} " ). format ( osd
= osd
, pg
= pg
, ns
= nspace
, name
= basename
)
639 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
641 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
644 lines
= get_lines ( TMPFILE
)
646 logging
. error ( "Removeall didn't remove all objects {ns} / {name} : {lines} " . format ( ns
= nspace
, name
= basename
, lines
= lines
))
650 cmd
= " {path} /rados -p {pool} rmsnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
652 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
654 logging
. error ( "rados rmsnap failed" )
662 if sys
. version_info
[ 0 ] < 3 :
663 sys
. stdout
= stdout
= os
. fdopen ( sys
. stdout
. fileno (), 'wb' , 0 )
665 stdout
= sys
. stdout
. buffer
666 if len ( argv
) > 1 and argv
[ 1 ] == "debug" :
671 call ( "rm -fr {dir} ; mkdir {dir} " . format ( dir = CEPH_DIR
), shell
= True )
672 os
. environ
[ "CEPH_DIR" ] = CEPH_DIR
673 OSDDIR
= os
. path
. join ( CEPH_DIR
, "dev" )
674 REP_POOL
= "rep_pool"
675 REP_NAME
= "REPobject"
678 if len ( argv
) > 0 and argv
[ 0 ] == 'large' :
680 NUM_REP_OBJECTS
= 800
681 NUM_CLONED_REP_OBJECTS
= 100
684 # Larger data sets for first object per namespace
685 DATALINECOUNT
= 50000
686 # Number of objects to do xattr/omap testing on
691 NUM_CLONED_REP_OBJECTS
= 2
694 # Larger data sets for first object per namespace
696 # Number of objects to do xattr/omap testing on
700 TESTDIR
= "/tmp/test. {pid} " . format ( pid
= pid
)
701 DATADIR
= "/tmp/data. {pid} " . format ( pid
= pid
)
702 CFSD_PREFIX
= CEPH_BIN
+ "/ceph-objectstore-tool --data-path " + OSDDIR
+ "/ {osd} "
703 PROFNAME
= "testecprofile"
705 os
. environ
[ 'CEPH_CONF' ] = CEPH_CONF
709 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= REP_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
711 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
712 REPID
= get_pool_id ( REP_POOL
, nullfd
)
714 print ( "Created Replicated pool # {repid} " . format ( repid
= REPID
))
716 cmd
= " {path} /ceph osd erasure-code-profile set {prof} crush-failure-domain=osd" . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
718 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
719 cmd
= " {path} /ceph osd erasure-code-profile get {prof} " . format ( prof
= PROFNAME
, path
= CEPH_BIN
)
721 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
722 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} erasure {prof} " . format ( pool
= EC_POOL
, prof
= PROFNAME
, pg
= PG_COUNT
, path
= CEPH_BIN
)
724 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
725 ECID
= get_pool_id ( EC_POOL
, nullfd
)
727 print ( "Created Erasure coded pool # {ecid} " . format ( ecid
= ECID
))
729 print ( "Creating {objs} objects in replicated pool" . format ( objs
=( NUM_REP_OBJECTS
* NUM_NSPACES
)))
730 cmd
= "mkdir -p {datadir} " . format ( datadir
= DATADIR
)
732 call ( cmd
, shell
= True )
736 objects
= range ( 1 , NUM_REP_OBJECTS
+ 1 )
737 nspaces
= range ( NUM_NSPACES
)
739 nspace
= get_nspace ( n
)
744 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
745 LNAME
= nspace
+ "-" + NAME
746 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
749 cmd
= "rm -f " + DDNAME
751 call ( cmd
, shell
= True )
754 dataline
= range ( DATALINECOUNT
)
757 fd
= open ( DDNAME
, "w" )
758 data
= "This is the replicated data for " + LNAME
+ " \n "
763 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
765 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
767 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
770 db
[ nspace
][ NAME
] = {}
772 if i
< ATTR_OBJS
+ 1 :
776 db
[ nspace
][ NAME
][ "xattr" ] = {}
780 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
781 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
782 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
784 ret
= call ( cmd
, shell
= True )
786 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
788 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
790 # Create omap header in all objects but REPobject1
791 if i
< ATTR_OBJS
+ 1 and i
!= 1 :
792 myhdr
= "hdr {i} " . format ( i
= i
)
793 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapheader {name} {hdr} " . format ( pool
= REP_POOL
, name
= NAME
, hdr
= myhdr
, nspace
= nspace
, path
= CEPH_BIN
)
795 ret
= call ( cmd
, shell
= True )
797 logging
. critical ( "setomapheader failed with {ret} " . format ( ret
= ret
))
799 db
[ nspace
][ NAME
][ "omapheader" ] = myhdr
801 db
[ nspace
][ NAME
][ "omap" ] = {}
805 mykey
= "okey {i} - {k} " . format ( i
= i
, k
= k
)
806 myval
= "oval {i} - {k} " . format ( i
= i
, k
= k
)
807 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setomapval {name} {key} {val} " . format ( pool
= REP_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
809 ret
= call ( cmd
, shell
= True )
811 logging
. critical ( "setomapval failed with {ret} " . format ( ret
= ret
))
812 db
[ nspace
][ NAME
][ "omap" ][ mykey
] = myval
815 cmd
= " {path} /rados -p {pool} mksnap snap1" . format ( pool
= REP_POOL
, path
= CEPH_BIN
)
817 call ( cmd
, shell
= True )
819 objects
= range ( 1 , NUM_CLONED_REP_OBJECTS
+ 1 )
820 nspaces
= range ( NUM_NSPACES
)
822 nspace
= get_nspace ( n
)
825 NAME
= REP_NAME
+ " {num} " . format ( num
= i
)
826 LNAME
= nspace
+ "-" + NAME
827 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
829 CLONENAME
= DDNAME
+ "__1"
832 cmd
= "mv -f " + DDNAME
+ " " + CLONENAME
834 call ( cmd
, shell
= True )
837 dataline
= range ( DATALINECOUNT
)
840 fd
= open ( DDNAME
, "w" )
841 data
= "This is the replicated data after a snapshot for " + LNAME
+ " \n "
846 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= REP_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
848 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
850 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
853 print ( "Creating {objs} objects in erasure coded pool" . format ( objs
=( NUM_EC_OBJECTS
* NUM_NSPACES
)))
855 objects
= range ( 1 , NUM_EC_OBJECTS
+ 1 )
856 nspaces
= range ( NUM_NSPACES
)
858 nspace
= get_nspace ( n
)
861 NAME
= EC_NAME
+ " {num} " . format ( num
= i
)
862 LNAME
= nspace
+ "-" + NAME
863 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
866 cmd
= "rm -f " + DDNAME
868 call ( cmd
, shell
= True )
871 dataline
= range ( DATALINECOUNT
)
874 fd
= open ( DDNAME
, "w" )
875 data
= "This is the erasure coded data for " + LNAME
+ " \n "
880 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= EC_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
882 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
884 logging
. critical ( "Erasure coded pool creation failed with {ret} " . format ( ret
= ret
))
887 db
[ nspace
][ NAME
] = {}
889 db
[ nspace
][ NAME
][ "xattr" ] = {}
890 if i
< ATTR_OBJS
+ 1 :
897 mykey
= "key {i} - {k} " . format ( i
= i
, k
= k
)
898 myval
= "val {i} - {k} " . format ( i
= i
, k
= k
)
899 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' setxattr {name} {key} {val} " . format ( pool
= EC_POOL
, name
= NAME
, key
= mykey
, val
= myval
, nspace
= nspace
, path
= CEPH_BIN
)
901 ret
= call ( cmd
, shell
= True )
903 logging
. error ( "setxattr failed with {ret} " . format ( ret
= ret
))
905 db
[ nspace
][ NAME
][ "xattr" ][ mykey
] = myval
907 # Omap isn't supported in EC pools
908 db
[ nspace
][ NAME
][ "omap" ] = {}
915 logging
. critical ( "Unable to set up test" )
918 ALLREPPGS
= get_pgs ( OSDDIR
, REPID
)
919 logging
. debug ( ALLREPPGS
)
920 ALLECPGS
= get_pgs ( OSDDIR
, ECID
)
921 logging
. debug ( ALLECPGS
)
923 OBJREPPGS
= get_objs ( ALLREPPGS
, REP_NAME
, OSDDIR
, REPID
)
924 logging
. debug ( OBJREPPGS
)
925 OBJECPGS
= get_objs ( ALLECPGS
, EC_NAME
, OSDDIR
, ECID
)
926 logging
. debug ( OBJECPGS
)
930 osds
= get_osds ( ONEPG
, OSDDIR
)
932 logging
. debug ( ONEOSD
)
934 print ( "Test invalid parameters" )
935 # On export can't use stdout to a terminal
936 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
937 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
939 # On export can't use stdout to a terminal
940 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
941 ERRORS
+= test_failure ( cmd
, "stdout is a tty and no --file filename specified" , tty
= True )
943 # Prep a valid ec export file for import failure tests
944 ONEECPG
= ALLECPGS
[ 0 ]
945 osds
= get_osds ( ONEECPG
, OSDDIR
)
947 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
948 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= ONEECPG
, file = OTHERFILE
)
950 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
952 # On import can't specify a different shard
953 BADPG
= ONEECPG
. split ( 's' )[ 0 ] + "s10"
954 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEECOSD
, pg
= BADPG
, file = OTHERFILE
)
955 ERRORS
+= test_failure ( cmd
, "Can't specify a different shard, must be" )
959 # Prep a valid export file for import failure tests
960 OTHERFILE
= "/tmp/foo. {pid} " . format ( pid
= pid
)
961 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
963 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
965 # On import can't specify a PG with a non-existent pool
966 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= "10.0" , file = OTHERFILE
)
967 ERRORS
+= test_failure ( cmd
, "Can't specify a different pgid pool, must be" )
969 # On import can't specify shard for a replicated export
970 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} s0 --file {file} " ). format ( osd
= ONEOSD
, pg
= ONEPG
, file = OTHERFILE
)
971 ERRORS
+= test_failure ( cmd
, "Can't specify a sharded pgid with a non-sharded export" )
973 # On import can't specify a PG with a bad seed
974 TMPPG
= " {pool} .80" . format ( pool
= REPID
)
975 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= ONEOSD
, pg
= TMPPG
, file = OTHERFILE
)
976 ERRORS
+= test_failure ( cmd
, "Illegal pgid, the seed is larger than current pg_num" )
979 cmd
= ( CFSD_PREFIX
+ "--op import --file {FOO} " ). format ( osd
= ONEOSD
, FOO
= OTHERFILE
)
980 ERRORS
+= test_failure ( cmd
, "file: {FOO} : No such file or directory" . format ( FOO
= OTHERFILE
))
982 cmd
= " {path} /ceph-objectstore-tool --data-path BAD_DATA_PATH --op list" . format ( osd
= ONEOSD
, path
= CEPH_BIN
)
983 ERRORS
+= test_failure ( cmd
, "data-path: BAD_DATA_PATH: No such file or directory" )
985 cmd
= " {path} /ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal" . format ( path
= CEPH_BIN
)
986 ERRORS
+= test_failure ( cmd
, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory" )
988 # On import can't use stdin from a terminal
989 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} " ). format ( osd
= ONEOSD
, pg
= ONEPG
)
990 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
992 # On import can't use stdin from a terminal
993 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file -" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
994 ERRORS
+= test_failure ( cmd
, "stdin is a tty and no --file filename specified" , tty
= True )
996 # Specify a bad --type
997 os
. mkdir ( OSDDIR
+ "/fakeosd" )
998 cmd
= ( " {path} /ceph-objectstore-tool --data-path " + OSDDIR
+ "/ {osd} --type foobar --op list --pgid {pg} " ). format ( osd
= "fakeosd" , pg
= ONEPG
, path
= CEPH_BIN
)
999 ERRORS
+= test_failure ( cmd
, "Unable to create store of type foobar" )
1001 # Don't specify a data-path
1002 cmd
= " {path} /ceph-objectstore-tool --type memstore --op list --pgid {pg} " . format ( dir = OSDDIR
, osd
= ONEOSD
, pg
= ONEPG
, path
= CEPH_BIN
)
1003 ERRORS
+= test_failure ( cmd
, "Must provide --data-path" )
1005 cmd
= ( CFSD_PREFIX
+ "--op remove" ). format ( osd
= ONEOSD
)
1006 ERRORS
+= test_failure ( cmd
, "Must provide pgid" )
1008 # Don't secify a --op nor object command
1009 cmd
= CFSD_PREFIX
. format ( osd
= ONEOSD
)
1010 ERRORS
+= test_failure ( cmd
, "Must provide --op or object command..." )
1012 # Specify a bad --op command
1013 cmd
= ( CFSD_PREFIX
+ "--op oops" ). format ( osd
= ONEOSD
)
1014 ERRORS
+= test_failure ( cmd
, "Must provide --op (info, log, remove, mkfs, fsck, export, import, list, fix-lost, list-pgs, rm-past-intervals, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete)" )
1016 # Provide just the object param not a command
1017 cmd
= ( CFSD_PREFIX
+ "object" ). format ( osd
= ONEOSD
)
1018 ERRORS
+= test_failure ( cmd
, "Invalid syntax, missing command" )
1020 # Provide an object name that doesn't exist
1021 cmd
= ( CFSD_PREFIX
+ "NON_OBJECT get-bytes" ). format ( osd
= ONEOSD
)
1022 ERRORS
+= test_failure ( cmd
, "No object id 'NON_OBJECT' found" )
1024 # Provide an invalid object command
1025 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} '' notacommand" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1026 ERRORS
+= test_failure ( cmd
, "Unknown object command 'notacommand'" )
1028 cmd
= ( CFSD_PREFIX
+ "foo list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1029 ERRORS
+= test_failure ( cmd
, "No object id 'foo' found or invalid JSON specified" )
1031 cmd
= ( CFSD_PREFIX
+ "'{{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }}' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1032 ERRORS
+= test_failure ( cmd
, "Without --pgid the object '{ \" oid \" : \" obj4 \" , \" key \" : \"\" , \" snapid \" :-1, \" hash \" :2826278768, \" max \" :0, \" pool \" :1, \" namespace \" : \"\" }' must be a JSON array" )
1034 cmd
= ( CFSD_PREFIX
+ "'[]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1035 ERRORS
+= test_failure ( cmd
, "Object '[]' must be a JSON array with 2 elements" )
1037 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" ]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1038 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" ]' must be a JSON array with 2 elements" )
1040 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.0 \" , 5, 8, 9]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1041 ERRORS
+= test_failure ( cmd
, "Object '[ \" 1.0 \" , 5, 8, 9]' must be a JSON array with 2 elements" )
1043 cmd
= ( CFSD_PREFIX
+ "'[1, 2]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1044 ERRORS
+= test_failure ( cmd
, "Object '[1, 2]' must be a JSON array with the first element a string" )
1046 cmd
= ( CFSD_PREFIX
+ "'[ \" 1.3 \" ,{{ \" snapid \" : \" not an int \" }}]' list-omap" ). format ( osd
= ONEOSD
, pg
= ONEPG
)
1047 ERRORS
+= test_failure ( cmd
, "Decode object JSON error: value type is 2 not 4" )
1049 TMPFILE
= r
"/tmp/tmp. {pid} " . format ( pid
= pid
)
1050 ALLPGS
= OBJREPPGS
+ OBJECPGS
1051 OSDS
= get_osds ( ALLPGS
[ 0 ], OSDDIR
)
1054 print ( "Test all --op dump-journal" )
1055 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1056 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1058 # Test --op list and generate json for all objects
1059 print ( "Test --op list variants" )
1061 # retrieve all objects from all PGs
1062 tmpfd
= open ( TMPFILE
, "wb" )
1063 cmd
= ( CFSD_PREFIX
+ "--op list --format json" ). format ( osd
= osd
)
1065 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1067 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1070 lines
= get_lines ( TMPFILE
)
1071 JSONOBJ
= sorted ( set ( lines
))
1072 ( pgid
, coll
, jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1074 # retrieve all objects in a given PG
1075 tmpfd
= open ( OTHERFILE
, "ab" )
1076 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} --format json" ). format ( osd
= osd
, pg
= pgid
)
1078 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1080 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1083 lines
= get_lines ( OTHERFILE
)
1084 JSONOBJ
= sorted ( set ( lines
))
1085 ( other_pgid
, other_coll
, other_jsondict
) = json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1087 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1088 logging
. error ( "the first line of --op list is different "
1089 "from the first line of --op list --pgid {pg} " . format ( pg
= pgid
))
1092 # retrieve all objects with a given name in a given PG
1093 tmpfd
= open ( OTHERFILE
, "wb" )
1094 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} {object} --format json" ). format ( osd
= osd
, pg
= pgid
, object = jsondict
[ 'oid' ])
1096 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1098 logging
. error ( "Bad exit status {ret} from {cmd} " . format ( ret
= ret
, cmd
= cmd
))
1101 lines
= get_lines ( OTHERFILE
)
1102 JSONOBJ
= sorted ( set ( lines
))
1103 ( other_pgid
, other_coll
, other_jsondict
) in json
. loads ( JSONOBJ
[ 0 ])[ 0 ]
1105 if pgid
!= other_pgid
or jsondict
!= other_jsondict
or coll
!= other_coll
:
1106 logging
. error ( "the first line of --op list is different "
1107 "from the first line of --op list --pgid {pg} {object} " . format ( pg
= pgid
, object = jsondict
[ 'oid' ]))
1110 print ( "Test --op list by generating json for all objects using default format" )
1112 OSDS
= get_osds ( pg
, OSDDIR
)
1114 tmpfd
= open ( TMPFILE
, "ab" )
1115 cmd
= ( CFSD_PREFIX
+ "--op list --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1117 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1119 logging
. error ( "Bad exit status {ret} from --op list request" . format ( ret
= ret
))
1123 lines
= get_lines ( TMPFILE
)
1124 JSONOBJ
= sorted ( set ( lines
))
1125 for JSON
in JSONOBJ
:
1126 ( pgid
, jsondict
) = json
. loads ( JSON
)
1127 # Skip clones for now
1128 if jsondict
[ 'snapid' ] != - 2 :
1130 db
[ jsondict
[ 'namespace' ]][ jsondict
[ 'oid' ]][ 'json' ] = json
. dumps (( pgid
, jsondict
))
1131 # print db[jsondict['namespace']][jsondict['oid']]['json']
1132 if jsondict
[ 'oid' ]. find ( EC_NAME
) == 0 and 'shard_id' not in jsondict
:
1133 logging
. error ( "Malformed JSON {json} " . format ( json
= JSON
))
1137 print ( "Test get-bytes and set-bytes" )
1138 for nspace
in db
. keys ():
1139 for basename
in db
[ nspace
]. keys ():
1140 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1141 JSON
= db
[ nspace
][ basename
][ 'json' ]
1142 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1143 TESTNAME
= "/tmp/testbytes. {pid} " . format ( pid
= pid
)
1144 SETNAME
= "/tmp/setbytes. {pid} " . format ( pid
= pid
)
1145 BADNAME
= "/tmp/badbytes. {pid} " . format ( pid
= pid
)
1146 for pg
in OBJREPPGS
:
1147 OSDS
= get_osds ( pg
, OSDDIR
)
1149 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1150 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1151 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1158 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-bytes {fname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, fname
= GETNAME
)
1160 ret
= call ( cmd
, shell
= True )
1162 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1165 cmd
= "diff -q {file} {getfile} " . format ( file = file , getfile
= GETNAME
)
1166 ret
= call ( cmd
, shell
= True )
1168 logging
. error ( "Data from get-bytes differ" )
1169 logging
. debug ( "Got:" )
1170 cat_file ( logging
. DEBUG
, GETNAME
)
1171 logging
. debug ( "Expected:" )
1172 cat_file ( logging
. DEBUG
, file )
1174 fd
= open ( SETNAME
, "w" )
1175 data
= "put-bytes going into {file} \n " . format ( file = file )
1178 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= SETNAME
)
1180 ret
= call ( cmd
, shell
= True )
1182 logging
. error ( "Bad exit status {ret} from set-bytes" . format ( ret
= ret
))
1184 fd
= open ( TESTNAME
, "wb" )
1185 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1187 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1190 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1192 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1194 ret
= call ( cmd
, shell
= True )
1196 logging
. error ( "Data after set-bytes differ" )
1197 logging
. debug ( "Got:" )
1198 cat_file ( logging
. DEBUG
, TESTNAME
)
1199 logging
. debug ( "Expected:" )
1200 cat_file ( logging
. DEBUG
, SETNAME
)
1203 # Use set-bytes with --dry-run and make sure contents haven't changed
1204 fd
= open ( BADNAME
, "w" )
1205 data
= "Bad data for --dry-run in {file} \n " . format ( file = file )
1208 cmd
= ( CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-bytes {sname} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, sname
= BADNAME
)
1210 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1212 logging
. error ( "Bad exit status {ret} from set-bytes --dry-run" . format ( ret
= ret
))
1214 fd
= open ( TESTNAME
, "wb" )
1215 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-bytes -" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1217 ret
= call ( cmd
, shell
= True , stdout
= fd
)
1220 logging
. error ( "Bad exit status {ret} from get-bytes" . format ( ret
= ret
))
1222 cmd
= "diff -q {setfile} {testfile} " . format ( setfile
= SETNAME
, testfile
= TESTNAME
)
1224 ret
= call ( cmd
, shell
= True )
1226 logging
. error ( "Data after set-bytes --dry-run changed!" )
1227 logging
. debug ( "Got:" )
1228 cat_file ( logging
. DEBUG
, TESTNAME
)
1229 logging
. debug ( "Expected:" )
1230 cat_file ( logging
. DEBUG
, SETNAME
)
1233 fd
= open ( file , "rb" )
1234 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' set-bytes" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1236 ret
= call ( cmd
, shell
= True , stdin
= fd
)
1238 logging
. error ( "Bad exit status {ret} from set-bytes to restore object" . format ( ret
= ret
))
1259 # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap
1260 print ( "Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap" )
1261 for nspace
in db
. keys ():
1262 for basename
in db
[ nspace
]. keys ():
1263 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1264 JSON
= db
[ nspace
][ basename
][ 'json' ]
1265 for pg
in OBJREPPGS
:
1266 OSDS
= get_osds ( pg
, OSDDIR
)
1268 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1269 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1270 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1273 for key
, val
in db
[ nspace
][ basename
][ "xattr" ]. items ():
1275 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-attr {key} " ). format ( osd
= osd
, json
= JSON
, key
= attrkey
)
1277 getval
= check_output ( cmd
, shell
= True )
1279 logging
. error ( "get-attr of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= attrkey
, get
= getval
, orig
= val
))
1282 # set-attr to bogus value "foobar"
1283 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1285 ret
= call ( cmd
, shell
= True )
1287 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1290 # Test set-attr with dry-run
1291 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1293 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1295 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1298 # Check the set-attr
1299 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1301 getval
= check_output ( cmd
, shell
= True )
1303 logging
. error ( "Bad exit status {ret} from get-attr" . format ( ret
= ret
))
1306 if getval
!= "foobar" :
1307 logging
. error ( "Check of set-attr failed because we got {val} " . format ( val
= getval
))
1311 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1313 ret
= call ( cmd
, shell
= True )
1315 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1318 # Check rm-attr with dry-run
1319 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1321 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1323 logging
. error ( "Bad exit status {ret} from rm-attr" . format ( ret
= ret
))
1326 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
)
1328 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1330 logging
. error ( "For rm-attr expect get-attr to fail, but it succeeded" )
1333 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= attrkey
, val
= val
)
1335 ret
= call ( cmd
, shell
= True )
1337 logging
. error ( "Bad exit status {ret} from set-attr" . format ( ret
= ret
))
1341 hdr
= db
[ nspace
][ basename
]. get ( "omapheader" , "" )
1342 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, json
= JSON
)
1344 gethdr
= check_output ( cmd
, shell
= True )
1346 logging
. error ( "get-omaphdr was wrong: {get} instead of {orig} " . format ( get
= gethdr
, orig
= hdr
))
1349 # set-omaphdr to bogus value "foobar"
1350 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1352 ret
= call ( cmd
, shell
= True )
1354 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1357 # Check the set-omaphdr
1358 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1360 gethdr
= check_output ( cmd
, shell
= True )
1362 logging
. error ( "Bad exit status {ret} from get-omaphdr" . format ( ret
= ret
))
1365 if gethdr
!= "foobar" :
1366 logging
. error ( "Check of set-omaphdr failed because we got {val} " . format ( val
= getval
))
1369 # Test dry-run with set-omaphdr
1370 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run ' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1372 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1374 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1378 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ "' {json} ' set-omaphdr" ). format ( osd
= osd
, pg
= pg
, json
= JSON
, val
= hdr
)
1380 ret
= call ( cmd
, shell
= True )
1382 logging
. error ( "Bad exit status {ret} from set-omaphdr" . format ( ret
= ret
))
1386 for omapkey
, val
in db
[ nspace
][ basename
][ "omap" ]. items ():
1387 cmd
= ( CFSD_PREFIX
+ " ' {json} ' get-omap {key} " ). format ( osd
= osd
, json
= JSON
, key
= omapkey
)
1389 getval
= check_output ( cmd
, shell
= True )
1391 logging
. error ( "get-omap of key {key} returned wrong val: {get} instead of {orig} " . format ( key
= omapkey
, get
= getval
, orig
= val
))
1394 # set-omap to bogus value "foobar"
1395 cmd
= ( "echo -n foobar | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1397 ret
= call ( cmd
, shell
= True )
1399 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1402 # Check set-omap with dry-run
1403 cmd
= ( "echo -n dryrunbroken | " + CFSD_PREFIX
+ "--dry-run --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1405 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1407 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1410 # Check the set-omap
1411 cmd
= ( CFSD_PREFIX
+ " --pgid {pg} ' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1413 getval
= check_output ( cmd
, shell
= True )
1415 logging
. error ( "Bad exit status {ret} from get-omap" . format ( ret
= ret
))
1418 if getval
!= "foobar" :
1419 logging
. error ( "Check of set-omap failed because we got {val} " . format ( val
= getval
))
1423 cmd
= ( CFSD_PREFIX
+ "' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1425 ret
= call ( cmd
, shell
= True )
1427 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1429 # Check rm-omap with dry-run
1430 cmd
= ( CFSD_PREFIX
+ "--dry-run ' {json} ' rm-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1432 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1434 logging
. error ( "Bad exit status {ret} from rm-omap" . format ( ret
= ret
))
1436 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
)
1438 ret
= call ( cmd
, shell
= True , stderr
= nullfd
, stdout
= nullfd
)
1440 logging
. error ( "For rm-omap expect get-omap to fail, but it succeeded" )
1443 cmd
= ( "echo -n {val} | " + CFSD_PREFIX
+ " --pgid {pg} ' {json} ' set-omap {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= omapkey
, val
= val
)
1445 ret
= call ( cmd
, shell
= True )
1447 logging
. error ( "Bad exit status {ret} from set-omap" . format ( ret
= ret
))
1453 for nspace
in db
. keys ():
1454 for basename
in db
[ nspace
]. keys ():
1455 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
+ "__head" )
1456 JSON
= db
[ nspace
][ basename
][ 'json' ]
1457 GETNAME
= "/tmp/getbytes. {pid} " . format ( pid
= pid
)
1458 for pg
in OBJREPPGS
:
1459 OSDS
= get_osds ( pg
, OSDDIR
)
1461 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1462 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1463 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1466 if int ( basename
. split ( REP_NAME
)[ 1 ]) > int ( NUM_CLONED_REP_OBJECTS
):
1468 cmd
= ( CFSD_PREFIX
+ " ' {json} ' dump | grep ' \" snap \" : 1,' > /dev/null" ). format ( osd
= osd
, json
= JSON
)
1470 ret
= call ( cmd
, shell
= True )
1472 logging
. error ( "Invalid dump for {json} " . format ( json
= JSON
))
1475 print ( "Test list-attrs get-attr" )
1476 ATTRFILE
= r
"/tmp/attrs. {pid} " . format ( pid
= pid
)
1477 VALFILE
= r
"/tmp/val. {pid} " . format ( pid
= pid
)
1478 for nspace
in db
. keys ():
1479 for basename
in db
[ nspace
]. keys ():
1480 file = os
. path
. join ( DATADIR
, nspace
+ "-" + basename
)
1481 JSON
= db
[ nspace
][ basename
][ 'json' ]
1482 jsondict
= json
. loads ( JSON
)
1484 if 'shard_id' in jsondict
:
1485 logging
. debug ( "ECobject " + JSON
)
1488 OSDS
= get_osds ( pg
, OSDDIR
)
1489 # Fix shard_id since we only have one json instance for each object
1490 jsondict
[ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1491 JSON
= json
. dumps ( jsondict
)
1493 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-attr hinfo_key" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1494 logging
. debug ( "TRY: " + cmd
)
1496 out
= check_output ( cmd
, shell
= True , stderr
= subprocess
. STDOUT
)
1497 logging
. debug ( "FOUND: {json} in {osd} has value ' {val} '" . format ( osd
= osd
, json
= JSON
, val
= out
))
1499 except subprocess
. CalledProcessError
as e
:
1500 if "No such file or directory" not in e
. output
and "No data available" not in e
. output
:
1502 # Assuming k=2 m=1 for the default ec pool
1504 logging
. error ( " {json} hinfo_key found {found} times instead of 3" . format ( json
= JSON
, found
= found
))
1508 # Make sure rep obj with rep pg or ec obj with ec pg
1509 if ( 'shard_id' in jsondict
) != ( pg
. find ( 's' ) > 0 ):
1511 if 'shard_id' in jsondict
:
1512 # Fix shard_id since we only have one json instance for each object
1513 jsondict
[ 'shard_id' ] = int ( pg
. split ( 's' )[ 1 ])
1514 JSON
= json
. dumps ( jsondict
)
1515 OSDS
= get_osds ( pg
, OSDDIR
)
1517 DIR
= os
. path
. join ( OSDDIR
, os
. path
. join ( osd
, os
. path
. join ( "current" , " {pg} _head" . format ( pg
= pg
))))
1518 fnames
= [ f
for f
in os
. listdir ( DIR
) if os
. path
. isfile ( os
. path
. join ( DIR
, f
))
1519 and f
. split ( "_" )[ 0 ] == basename
and f
. split ( "_" )[ 4 ] == nspace
]
1522 afd
= open ( ATTRFILE
, "wb" )
1523 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' list-attrs" ). format ( osd
= osd
, pg
= pg
, json
= JSON
)
1525 ret
= call ( cmd
, shell
= True , stdout
= afd
)
1528 logging
. error ( "list-attrs failed with {ret} " . format ( ret
= ret
))
1531 keys
= get_lines ( ATTRFILE
)
1532 values
= dict ( db
[ nspace
][ basename
][ "xattr" ])
1534 if key
== "_" or key
== "snapset" or key
== "hinfo_key" :
1536 key
= key
. strip ( "_" )
1537 if key
not in values
:
1538 logging
. error ( "Unexpected key {key} present" . format ( key
= key
))
1541 exp
= values
. pop ( key
)
1542 vfd
= open ( VALFILE
, "wb" )
1543 cmd
= ( CFSD_PREFIX
+ "--pgid {pg} ' {json} ' get-attr {key} " ). format ( osd
= osd
, pg
= pg
, json
= JSON
, key
= "_" + key
)
1545 ret
= call ( cmd
, shell
= True , stdout
= vfd
)
1548 logging
. error ( "get-attr failed with {ret} " . format ( ret
= ret
))
1551 lines
= get_lines ( VALFILE
)
1554 logging
. error ( "For key {key} got value {got} instead of {expected} " . format ( key
= key
, got
= val
, expected
= exp
))
1556 if len ( values
) != 0 :
1557 logging
. error ( "Not all keys found, remaining keys:" )
1560 print ( "Test --op meta-list" )
1561 tmpfd
= open ( TMPFILE
, "wb" )
1562 cmd
= ( CFSD_PREFIX
+ "--op meta-list" ). format ( osd
= ONEOSD
)
1564 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1566 logging
. error ( "Bad exit status {ret} from --op meta-list request" . format ( ret
= ret
))
1569 print ( "Test get-bytes on meta" )
1571 lines
= get_lines ( TMPFILE
)
1572 JSONOBJ
= sorted ( set ( lines
))
1573 for JSON
in JSONOBJ
:
1574 ( pgid
, jsondict
) = json
. loads ( JSON
)
1576 logging
. error ( "pgid incorrect for --op meta-list {pgid} " . format ( pgid
= pgid
))
1578 if jsondict
[ 'namespace' ] != "" :
1579 logging
. error ( "namespace non null --op meta-list {ns} " . format ( ns
= jsondict
[ 'namespace' ]))
1586 cmd
= ( CFSD_PREFIX
+ "' {json} ' get-bytes {fname} " ). format ( osd
= ONEOSD
, json
= JSON
, fname
= GETNAME
)
1588 ret
= call ( cmd
, shell
= True )
1590 logging
. error ( "Bad exit status {ret} " . format ( ret
= ret
))
1602 print ( "Test pg info" )
1603 for pg
in ALLREPPGS
+ ALLECPGS
:
1604 for osd
in get_osds ( pg
, OSDDIR
):
1605 cmd
= ( CFSD_PREFIX
+ "--op info --pgid {pg} | grep ' \" pgid \" : \" {pg} \" '" ). format ( osd
= osd
, pg
= pg
)
1607 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1609 logging
. error ( "Getting info failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1612 print ( "Test pg logging" )
1613 if len ( ALLREPPGS
+ ALLECPGS
) == len ( OBJREPPGS
+ OBJECPGS
):
1614 logging
. warning ( "All PGs have objects, so no log without modify entries" )
1615 for pg
in ALLREPPGS
+ ALLECPGS
:
1616 for osd
in get_osds ( pg
, OSDDIR
):
1617 tmpfd
= open ( TMPFILE
, "wb" )
1618 cmd
= ( CFSD_PREFIX
+ "--op log --pgid {pg} " ). format ( osd
= osd
, pg
= pg
)
1620 ret
= call ( cmd
, shell
= True , stdout
= tmpfd
)
1622 logging
. error ( "Getting log failed for pg {pg} from {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1624 HASOBJ
= pg
in OBJREPPGS
+ OBJECPGS
1626 for line
in get_lines ( TMPFILE
):
1627 if line
. find ( "modify" ) != - 1 :
1630 if HASOBJ
!= MODOBJ
:
1631 logging
. error ( "Bad log for pg {pg} from {osd} " . format ( pg
= pg
, osd
= osd
))
1632 MSG
= ( HASOBJ
and [ "" ] or [ "NOT " ])[ 0 ]
1633 print ( "Log should {msg} have a modify entry" . format ( msg
= MSG
))
1641 print ( "Test list-pgs" )
1642 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1644 CHECK_PGS
= get_osd_pgs ( os
. path
. join ( OSDDIR
, osd
), None )
1645 CHECK_PGS
= sorted ( CHECK_PGS
)
1647 cmd
= ( CFSD_PREFIX
+ "--op list-pgs" ). format ( osd
= osd
)
1649 TEST_PGS
= check_output ( cmd
, shell
= True ). split ( " \n " )
1650 TEST_PGS
= sorted ( TEST_PGS
)[ 1 :] # Skip extra blank line
1652 if TEST_PGS
!= CHECK_PGS
:
1653 logging
. error ( "list-pgs got wrong result for osd. {osd} " . format ( osd
= osd
))
1654 logging
. error ( "Expected {pgs} " . format ( pgs
= CHECK_PGS
))
1655 logging
. error ( "Got {pgs} " . format ( pgs
= TEST_PGS
))
1659 print ( "Test pg export --dry-run" )
1661 osd
= get_osds ( pg
, OSDDIR
)[ 0 ]
1662 fname
= "/tmp/fname. {pid} " . format ( pid
= pid
)
1663 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1665 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1667 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1669 elif os
. path
. exists ( fname
):
1670 logging
. error ( "Exporting --dry-run created file" )
1673 cmd
= ( CFSD_PREFIX
+ "--dry-run --op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1675 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1677 logging
. error ( "Exporting --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1680 outdata
= get_lines ( fname
)
1681 if len ( outdata
) > 0 :
1682 logging
. error ( "Exporting --dry-run to stdout not empty" )
1683 logging
. error ( "Data: " + outdata
)
1687 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1688 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1689 print ( "Test pg export" )
1690 for pg
in ALLREPPGS
+ ALLECPGS
:
1691 for osd
in get_osds ( pg
, OSDDIR
):
1692 mydir
= os
. path
. join ( TESTDIR
, osd
)
1693 fname
= os
. path
. join ( mydir
, pg
)
1694 if pg
== ALLREPPGS
[ 0 ]:
1695 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1696 elif pg
== ALLREPPGS
[ 1 ]:
1697 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file - > {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1699 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1701 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1703 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1706 ERRORS
+= EXP_ERRORS
1708 print ( "Test pg removal" )
1710 for pg
in ALLREPPGS
+ ALLECPGS
:
1711 for osd
in get_osds ( pg
, OSDDIR
):
1712 # This should do nothing
1713 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} --dry-run" ). format ( pg
= pg
, osd
= osd
)
1715 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1717 logging
. error ( "Removing --dry-run failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1719 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1721 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1723 logging
. error ( "Removing failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1729 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 :
1730 print ( "Test pg import" )
1731 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1732 dir = os
. path
. join ( TESTDIR
, osd
)
1733 PGS
= [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]
1735 file = os
. path
. join ( dir , pg
)
1736 # This should do nothing
1737 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} --dry-run" ). format ( osd
= osd
, file = file )
1739 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1741 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1744 cmd
= ( "cat {file} |" . format ( file = file ) + CFSD_PREFIX
+ "--op import" ). format ( osd
= osd
)
1746 cmd
= ( CFSD_PREFIX
+ "--op import --file - --pgid {pg} < {file} " ). format ( osd
= osd
, file = file , pg
= pg
)
1748 cmd
= ( CFSD_PREFIX
+ "--op import --file {file} " ). format ( osd
= osd
, file = file )
1750 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1752 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1755 logging
. warning ( "SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES" )
1757 ERRORS
+= IMP_ERRORS
1760 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1761 print ( "Verify replicated import data" )
1762 data_errors
, _
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, REP_NAME
)
1763 ERRORS
+= data_errors
1765 logging
. warning ( "SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES" )
1767 print ( "Test all --op dump-journal again" )
1768 ALLOSDS
= [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]
1769 ERRORS
+= test_dump_journal ( CFSD_PREFIX
, ALLOSDS
)
1774 if EXP_ERRORS
== 0 and RM_ERRORS
== 0 and IMP_ERRORS
== 0 :
1775 print ( "Verify erasure coded import data" )
1776 ERRORS
+= verify ( DATADIR
, EC_POOL
, EC_NAME
, db
)
1777 # Check replicated data/xattr/omap using rados
1778 print ( "Verify replicated import data using rados" )
1779 ERRORS
+= verify ( DATADIR
, REP_POOL
, REP_NAME
, db
)
1782 NEWPOOL
= "rados-import-pool"
1783 cmd
= " {path} /rados mkpool {pool} " . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1785 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1787 print ( "Test rados import" )
1789 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1790 dir = os
. path
. join ( TESTDIR
, osd
)
1791 for pg
in [ f
for f
in os
. listdir ( dir ) if os
. path
. isfile ( os
. path
. join ( dir , f
))]:
1792 if pg
. find ( " {id} ." . format ( id = REPID
)) != 0 :
1794 file = os
. path
. join ( dir , pg
)
1797 # This should do nothing
1798 cmd
= " {path} /rados import -p {pool} --dry-run {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1800 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1802 logging
. error ( "Rados import --dry-run failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1804 cmd
= " {path} /rados -p {pool} ls" . format ( pool
= NEWPOOL
, path
= CEPH_BIN
)
1806 data
= check_output ( cmd
, shell
= True )
1808 logging
. error ( "' {data} '" . format ( data
= data
))
1809 logging
. error ( "Found objects after dry-run" )
1811 cmd
= " {path} /rados import -p {pool} {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1813 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1815 logging
. error ( "Rados import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1817 cmd
= " {path} /rados import -p {pool} --no-overwrite {file} " . format ( pool
= NEWPOOL
, file = file , path
= CEPH_BIN
)
1819 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1821 logging
. error ( "Rados import --no-overwrite failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1824 ERRORS
+= verify ( DATADIR
, NEWPOOL
, REP_NAME
, db
)
1826 logging
. warning ( "SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES" )
1828 # Clear directories of previous portion
1829 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1830 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1834 # Cause SPLIT_POOL to split and test import with object/log filtering
1835 print ( "Testing import all objects after a split" )
1836 SPLIT_POOL
= "split_pool"
1839 SPLIT_NSPACE_COUNT
= 2
1840 SPLIT_NAME
= "split"
1841 cmd
= " {path} /ceph osd pool create {pool} {pg} {pg} replicated" . format ( pool
= SPLIT_POOL
, pg
= PG_COUNT
, path
= CEPH_BIN
)
1843 call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1844 SPLITID
= get_pool_id ( SPLIT_POOL
, nullfd
)
1845 pool_size
= int ( check_output ( " {path} /ceph osd pool get {pool} size" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
), shell
= True , stderr
= nullfd
). split ( " " )[ 1 ])
1850 objects
= range ( 1 , SPLIT_OBJ_COUNT
+ 1 )
1851 nspaces
= range ( SPLIT_NSPACE_COUNT
)
1853 nspace
= get_nspace ( n
)
1856 NAME
= SPLIT_NAME
+ " {num} " . format ( num
= i
)
1857 LNAME
= nspace
+ "-" + NAME
1858 DDNAME
= os
. path
. join ( DATADIR
, LNAME
)
1861 cmd
= "rm -f " + DDNAME
1863 call ( cmd
, shell
= True )
1866 dataline
= range ( DATALINECOUNT
)
1869 fd
= open ( DDNAME
, "w" )
1870 data
= "This is the split data for " + LNAME
+ " \n "
1875 cmd
= " {path} /rados -p {pool} -N ' {nspace} ' put {name} {ddname} " . format ( pool
= SPLIT_POOL
, name
= NAME
, ddname
= DDNAME
, nspace
= nspace
, path
= CEPH_BIN
)
1877 ret
= call ( cmd
, shell
= True , stderr
= nullfd
)
1879 logging
. critical ( "Rados put command failed with {ret} " . format ( ret
= ret
))
1885 for osd
in [ f
for f
in os
. listdir ( OSDDIR
) if os
. path
. isdir ( os
. path
. join ( OSDDIR
, f
)) and f
. find ( "osd" ) == 0 ]:
1886 os
. mkdir ( os
. path
. join ( TESTDIR
, osd
))
1888 pg
= " {pool} .0" . format ( pool
= SPLITID
)
1891 export_osds
= get_osds ( pg
, OSDDIR
)
1892 for osd
in export_osds
:
1893 mydir
= os
. path
. join ( TESTDIR
, osd
)
1894 fname
= os
. path
. join ( mydir
, pg
)
1895 cmd
= ( CFSD_PREFIX
+ "--op export --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1897 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1899 logging
. error ( "Exporting failed for pg {pg} on {osd} with {ret} " . format ( pg
= pg
, osd
= osd
, ret
= ret
))
1902 ERRORS
+= EXP_ERRORS
1908 cmd
= " {path} /ceph osd pool set {pool} pg_num 2" . format ( pool
= SPLIT_POOL
, path
= CEPH_BIN
)
1910 ret
= call ( cmd
, shell
= True , stdout
= nullfd
, stderr
= nullfd
)
1916 # Now 2 PGs, poolid.0 and poolid.1
1917 for seed
in range ( 2 ):
1918 pg
= " {pool} . {seed} " . format ( pool
= SPLITID
, seed
= seed
)
1921 for osd
in get_osds ( pg
, OSDDIR
):
1922 cmd
= ( CFSD_PREFIX
+ "--op remove --pgid {pg} " ). format ( pg
= pg
, osd
= osd
)
1924 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1926 # This is weird. The export files are based on only the EXPORT_PG
1927 # and where that pg was before the split. Use 'which' to use all
1928 # export copies in import.
1929 mydir
= os
. path
. join ( TESTDIR
, export_osds
[ which
])
1930 fname
= os
. path
. join ( mydir
, EXPORT_PG
)
1932 cmd
= ( CFSD_PREFIX
+ "--op import --pgid {pg} --file {file} " ). format ( osd
= osd
, pg
= pg
, file = fname
)
1934 ret
= call ( cmd
, shell
= True , stdout
= nullfd
)
1936 logging
. error ( "Import failed from {file} with {ret} " . format ( file = file , ret
= ret
))
1939 ERRORS
+= IMP_ERRORS
1941 # Start up again to make sure imports didn't corrupt anything
1943 print ( "Verify split import data" )
1944 data_errors
, count
= check_data ( DATADIR
, TMPFILE
, OSDDIR
, SPLIT_NAME
)
1945 ERRORS
+= data_errors
1946 if count
!= ( SPLIT_OBJ_COUNT
* SPLIT_NSPACE_COUNT
* pool_size
):
1947 logging
. error ( "Incorrect number of replicas seen {count} " . format ( count
= count
))
1952 call ( "/bin/rm -rf {dir} " . format ( dir = TESTDIR
), shell
= True )
1953 call ( "/bin/rm -rf {dir} " . format ( dir = DATADIR
), shell
= True )
1955 ERRORS
+= test_removeall ( CFSD_PREFIX
, db
, OBJREPPGS
, REP_POOL
, CEPH_BIN
, OSDDIR
, REP_NAME
, NUM_CLONED_REP_OBJECTS
)
1957 # vstart() starts 4 OSDs
1958 ERRORS
+= test_get_set_osdmap ( CFSD_PREFIX
, list ( range ( 4 )), ALLOSDS
)
1959 ERRORS
+= test_get_set_inc_osdmap ( CFSD_PREFIX
, ALLOSDS
[ 0 ])
1961 print ( "TEST PASSED" )
1964 print ( "TEST FAILED WITH {errcount} ERRORS" . format ( errcount
= ERRORS
))
1968 def remove_btrfs_subvolumes ( path
):
1969 if platform
. system () == "FreeBSD" :
1971 result
= subprocess
. Popen ( "stat -f -c ' %% T' %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
1972 for line
in result
. stdout
:
1973 filesystem
= decode ( line
). rstrip ( ' \n ' )
1974 if filesystem
== "btrfs" :
1975 result
= subprocess
. Popen ( "sudo btrfs subvolume list %s " % path
, shell
= True , stdout
= subprocess
. PIPE
)
1976 for line
in result
. stdout
:
1977 subvolume
= decode ( line
). split ()[ 8 ]
1978 # extracting the relative volume name
1979 m
= re
. search ( ".*( %s .*)" % path
, subvolume
)
1982 call ( "sudo btrfs subvolume delete %s " % found
, shell
= True )
1985 if __name__
== "__main__" :
1988 status
= main ( sys
. argv
[ 1 :])
1991 remove_btrfs_subvolumes ( CEPH_DIR
)
1992 call ( "/bin/rm -fr {dir} " . format ( dir = CEPH_DIR
), shell
= True )