]>
git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py
5 # Copyright(c) 2016 Intel Corporation. All rights reserved.
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions
12 # * Redistributions of source code must retain the above copyright
13 # notice, this list of conditions and the following disclaimer.
14 # * Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in
16 # the documentation and/or other materials provided with the
18 # * Neither the name of Intel Corporation nor the names of its
19 # contributors may be used to endorse or promote products derived
20 # from this software without specific prior written permission.
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 # This script maps the set of pipelines identified (MASTER pipelines are
36 # ignored) from the input configuration file to the set of cores
37 # provided as input argument and creates configuration files for each of
38 # the mapping combinations.
41 from __future__
import print_function
42 from collections
import namedtuple
52 enable_stage0_traceout
= 1
53 enable_stage1_traceout
= 1
54 enable_stage2_traceout
= 1
56 enable_stage1_fileout
= 1
57 enable_stage2_fileout
= 1
59 Constants
= namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
60 constants
= Constants(16, 64)
62 # pattern for physical core
63 pattern_phycore
= '^(s|S)\d(c|C)[1-9][0-9]*$'
64 reg_phycore
= re
.compile(pattern_phycore
)
68 return bin(mask
).count("1")
76 sys
.exit('error: len2mask - length %i > 64. exiting' % length
)
78 return int('1' * length
, 2)
81 def bitstring_write(n
, n_bits
):
119 self
.bitpos
= array
.array(
120 "L", itertools
.repeat(0, constants
.MAX_PIPELINES
))
126 self
.cores
= [Cores0() for i
in range(0, constants
.MAX_CORES
)]
129 self
.n_pipelines0
= 0
131 self
.file_comment
= ""
135 def stage0_print(self
):
136 print('printing Context0 obj')
137 print('c0.cores(n_pipelines) = [ ', end
='')
138 for cores_count
in range(0, constants
.MAX_CORES
):
139 print(self
.cores
[cores_count
].n_pipelines
, end
=' ')
141 print('c0.n_cores = %d' % self
.n_cores
)
142 print('c0.n_pipelines = %d' % self
.n_pipelines
)
143 print('c0.n_pipelines0 = %d' % self
.n_pipelines0
)
144 print('c0.pos = %d' % self
.pos
)
145 print('c0.file_comment = %s' % self
.file_comment
)
146 if (self
.ctx1
is not None):
147 print('c0.ctx1 = ', end
='')
148 print(repr(self
.ctx1
))
150 print('c0.ctx1 = None')
152 if (self
.ctx2
is not None):
153 print('c0.ctx2 = ', end
='')
154 print(repr(self
.ctx2
))
156 print('c0.ctx2 = None')
158 def stage0_init(self
, num_cores
, num_pipelines
, ctx1
, ctx2
):
159 self
.n_cores
= num_cores
160 self
.n_pipelines
= num_pipelines
164 def stage0_process(self
):
166 self
.cores
[0].n_pipelines
= self
.n_pipelines
167 self
.n_pipelines0
= 0
173 if ((self
.pos
< self
.n_cores
) and (self
.n_pipelines0
> 0)):
174 self
.cores
[self
.pos
].n_pipelines
= min(
175 self
.cores
[self
.pos
- 1].n_pipelines
,
177 self
.n_pipelines0
-= self
.cores
[self
.pos
].n_pipelines
183 if (self
.n_pipelines0
== 0):
185 self
.ctx1
.stage1_init(self
, self
.ctx2
) # self is object c0
186 self
.ctx1
.stage1_process()
194 if ((self
.cores
[self
.pos
].n_pipelines
> 1) and
195 (self
.pos
!= (self
.n_cores
- 1))):
198 self
.n_pipelines0
+= self
.cores
[self
.pos
].n_pipelines
199 self
.cores
[self
.pos
].n_pipelines
= 0
202 self
.cores
[self
.pos
].n_pipelines
-= 1
203 self
.n_pipelines0
+= 1
206 def stage0_log(self
):
207 tmp_file_comment
= ""
208 if(enable_stage0_traceout
!= 1):
211 print('STAGE0: ', end
='')
212 tmp_file_comment
+= 'STAGE0: '
213 for cores_count
in range(0, self
.n_cores
):
216 self
.cores
[cores_count
].n_pipelines
), end
='')
217 tmp_file_comment
+= "C{} = {}\t".format(
218 cores_count
, self
.cores
[cores_count
].n_pipelines
)
221 self
.ctx1
.stage0_file_comment
= tmp_file_comment
222 self
.ctx2
.stage0_file_comment
= tmp_file_comment
229 self
.cores
= [Cores1() for i
in range(constants
.MAX_CORES
)]
233 self
.stage0_file_comment
= ""
234 self
.stage1_file_comment
= ""
237 self
.arr_pipelines2cores
= []
239 def stage1_reset(self
):
240 for i
in range(constants
.MAX_CORES
):
241 self
.cores
[i
].pipelines
= 0
242 self
.cores
[i
].n_pipelines
= 0
249 del self
.arr_pipelines2cores
[:]
251 def stage1_print(self
):
252 print('printing Context1 obj')
253 print('ctx1.cores(pipelines,n_pipelines) = [ ', end
='')
254 for cores_count
in range(0, constants
.MAX_CORES
):
255 print('(%d,%d)' % (self
.cores
[cores_count
].pipelines
,
256 self
.cores
[cores_count
].n_pipelines
), end
=' ')
258 print('ctx1.n_cores = %d' % self
.n_cores
)
259 print('ctx1.n_pipelines = %d' % self
.n_pipelines
)
260 print('ctx1.pos = %d' % self
.pos
)
261 print('ctx1.stage0_file_comment = %s' % self
.stage0_file_comment
)
262 print('ctx1.stage1_file_comment = %s' % self
.stage1_file_comment
)
263 if (self
.ctx2
is not None):
264 print('ctx1.ctx2 = ', end
='')
267 print('ctx1.ctx2 = None')
269 def stage1_init(self
, c0
, ctx2
):
272 while (c0
.cores
[self
.n_cores
].n_pipelines
> 0):
275 self
.n_pipelines
= c0
.n_pipelines
278 self
.arr_pipelines2cores
= [0] * self
.n_pipelines
281 while (i
< self
.n_cores
):
282 self
.cores
[i
].n_pipelines
= c0
.cores
[i
].n_pipelines
285 def stage1_process(self
):
286 pipelines_max
= len2mask(self
.n_pipelines
)
291 if (self
.cores
[self
.pos
].pipelines
== pipelines_max
):
295 self
.cores
[self
.pos
].pipelines
= 0
299 self
.cores
[self
.pos
].pipelines
+= 1
300 if (popcount(self
.cores
[self
.pos
].pipelines
) !=
301 self
.cores
[self
.pos
].n_pipelines
):
306 while (pos
< self
.pos
):
307 if ((self
.cores
[self
.pos
].pipelines
) &
308 (self
.cores
[pos
].pipelines
)):
316 if ((self
.pos
> 0) and
317 ((self
.cores
[self
.pos
].n_pipelines
) ==
318 (self
.cores
[self
.pos
- 1].n_pipelines
)) and
319 ((self
.cores
[self
.pos
].pipelines
) <
320 (self
.cores
[self
.pos
- 1].pipelines
))):
323 if (self
.pos
== self
.n_cores
- 1):
325 self
.ctx2
.stage2_init(self
)
326 self
.ctx2
.stage2_process()
331 self
.cores
[self
.pos
].pipelines
= 0
337 def stage1_log(self
):
338 tmp_file_comment
= ""
339 if(enable_stage1_traceout
== 1):
340 print('STAGE1: ', end
='')
341 tmp_file_comment
+= 'STAGE1: '
343 while (i
< self
.n_cores
):
344 print('C%d = [' % i
, end
='')
345 tmp_file_comment
+= "C{} = [".format(i
)
347 j
= self
.n_pipelines
- 1
349 cond
= ((self
.cores
[i
].pipelines
) & (1 << j
))
352 tmp_file_comment
+= '1'
355 tmp_file_comment
+= '0'
359 tmp_file_comment
+= ']\t'
363 self
.stage1_file_comment
= tmp_file_comment
364 self
.ctx2
.stage1_file_comment
= tmp_file_comment
366 # check if file traceing is enabled
367 if(enable_stage1_fileout
!= 1):
370 # spit out the combination to file
371 self
.stage1_process_file()
373 def stage1_updateCoresInBuf(self
, nPipeline
, sCore
):
374 rePipeline
= self
._fileTrace
.arr_pipelines
[nPipeline
]
375 rePipeline
= rePipeline
.replace("[", "\[").replace("]", "\]")
376 reCore
= 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
377 sSubs
= 'core = ' + sCore
+ '\n'
379 reg_pipeline
= re
.compile(rePipeline
)
380 search_match
= reg_pipeline
.search(self
._fileTrace
.in_buf
)
383 pos
= search_match
.start()
384 substr1
= self
._fileTrace
.in_buf
[:pos
]
385 substr2
= self
._fileTrace
.in_buf
[pos
:]
386 substr2
= re
.sub(reCore
, sSubs
, substr2
, 1)
387 self
._fileTrace
.in_buf
= substr1
+ substr2
389 def stage1_process_file(self
):
390 outFileName
= os
.path
.join(self
._fileTrace
.out_path
,
391 self
._fileTrace
.prefix_outfile
)
392 outFileName
+= "_{}CoReS".format(self
.n_cores
)
394 i
= 0 # represents core number
395 while (i
< self
.n_cores
):
396 j
= self
.n_pipelines
- 1
399 cond
= ((self
.cores
[i
].pipelines
) & (1 << j
))
401 # update the pipelines array to match the core
402 # only in case of cond match
403 self
.arr_pipelines2cores
[
404 pipeline_idx
] = fileTrace
.in_physical_cores
[i
]
411 # update the in_buf as per the arr_pipelines2cores
412 for pipeline_idx
in range(len(self
.arr_pipelines2cores
)):
413 outFileName
+= "_{}".format(self
.arr_pipelines2cores
[pipeline_idx
])
414 self
.stage1_updateCoresInBuf(
415 pipeline_idx
, self
.arr_pipelines2cores
[pipeline_idx
])
417 # by now the in_buf is all set to be written to file
418 outFileName
+= self
._fileTrace
.suffix_outfile
419 outputFile
= open(outFileName
, "w")
421 # write out the comments
422 strTruncated
= ("", "(Truncated)")[self
._fileTrace
.ncores_truncated
]
424 "; =============== Pipeline-to-Core Mapping ================\n"
425 "; Generated from file {}\n"
426 "; Input pipelines = {}\n"
427 "; Input cores = {}\n"
428 "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
430 self
._fileTrace
.in_file_namepath
,
431 fileTrace
.arr_pipelines
,
432 fileTrace
.in_physical_cores
,
433 self
._fileTrace
.n_pipelines
,
434 self
._fileTrace
.n_cores
,
436 self
._fileTrace
.hyper_thread
))
441 "; ========================================================\n"
444 stg0cmt
=self
.stage0_file_comment
,
445 stg1cmt
=self
.stage1_file_comment
))
447 # write buffer contents
448 outputFile
.write(self
._fileTrace
.in_buf
)
457 self
.cores
= [Cores2() for i
in range(constants
.MAX_CORES
)]
461 self
.stage0_file_comment
= ""
462 self
.stage1_file_comment
= ""
463 self
.stage2_file_comment
= ""
465 # each array entry is a pipeline mapped to core stored as string
466 # pipeline ranging from 1 to n, however stored in zero based array
467 self
.arr2_pipelines2cores
= []
469 def stage2_print(self
):
470 print('printing Context2 obj')
471 print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
472 for cores_count
in range(0, constants
.MAX_CORES
):
473 print('core[%d] = (%d,%d,%d,%d)' % (
475 self
.cores
[cores_count
].pipelines
,
476 self
.cores
[cores_count
].n_pipelines
,
477 self
.cores
[cores_count
].counter
,
478 self
.cores
[cores_count
].counter_max
))
480 print('ctx2.n_cores = %d' % self
.n_cores
, end
='')
481 print('ctx2.n_pipelines = %d' % self
.n_pipelines
, end
='')
482 print('ctx2.pos = %d' % self
.pos
)
483 print('ctx2.stage0_file_comment = %s' %
484 self
.self
.stage0_file_comment
)
485 print('ctx2.stage1_file_comment = %s' %
486 self
.self
.stage1_file_comment
)
487 print('ctx2.stage2_file_comment = %s' %
488 self
.self
.stage2_file_comment
)
490 def stage2_reset(self
):
491 for i
in range(0, constants
.MAX_CORES
):
492 self
.cores
[i
].pipelines
= 0
493 self
.cores
[i
].n_pipelines
= 0
494 self
.cores
[i
].counter
= 0
495 self
.cores
[i
].counter_max
= 0
497 for idx
in range(0, constants
.MAX_PIPELINES
):
498 self
.cores
[i
].bitpos
[idx
] = 0
504 del self
.arr2_pipelines2cores
[:]
506 def bitpos_load(self
, coreidx
):
508 while (i
< self
.n_pipelines
):
509 if ((self
.cores
[coreidx
].pipelines
) &
511 self
.cores
[coreidx
].bitpos
[j
] = i
514 self
.cores
[coreidx
].n_pipelines
= j
516 def bitpos_apply(self
, in_buf
, pos
, n_pos
):
518 for i
in range(0, n_pos
):
519 out |
= (in_buf
& (1 << i
)) << (pos
[i
] - i
)
523 def stage2_init(self
, ctx1
):
525 self
.n_cores
= ctx1
.n_cores
526 self
.n_pipelines
= ctx1
.n_pipelines
528 self
.arr2_pipelines2cores
= [''] * self
.n_pipelines
531 while (core_idx
< self
.n_cores
):
532 self
.cores
[core_idx
].pipelines
= ctx1
.cores
[core_idx
].pipelines
534 self
.bitpos_load(core_idx
)
537 def stage2_log(self
):
538 tmp_file_comment
= ""
539 if(enable_stage2_traceout
== 1):
540 print('STAGE2: ', end
='')
541 tmp_file_comment
+= 'STAGE2: '
543 for i
in range(0, self
.n_cores
):
544 mask
= len2mask(self
.cores
[i
].n_pipelines
)
545 pipelines_ht0
= self
.bitpos_apply(
546 (~self
.cores
[i
].counter
) & mask
,
547 self
.cores
[i
].bitpos
,
548 self
.cores
[i
].n_pipelines
)
550 pipelines_ht1
= self
.bitpos_apply(
551 self
.cores
[i
].counter
,
552 self
.cores
[i
].bitpos
,
553 self
.cores
[i
].n_pipelines
)
555 print('C%dHT0 = [' % i
, end
='')
556 tmp_file_comment
+= "C{}HT0 = [".format(i
)
557 tmp_file_comment
+= bitstring_write(
558 pipelines_ht0
, self
.n_pipelines
)
560 print(']\tC%dHT1 = [' % i
, end
='')
561 tmp_file_comment
+= "]\tC{}HT1 = [".format(i
)
562 tmp_file_comment
+= bitstring_write(
563 pipelines_ht1
, self
.n_pipelines
)
565 tmp_file_comment
+= ']\t'
568 self
.stage2_file_comment
= tmp_file_comment
570 # check if file traceing is enabled
571 if(enable_stage2_fileout
!= 1):
573 # spit out the combination to file
574 self
.stage2_process_file()
576 def stage2_updateCoresInBuf(self
, nPipeline
, sCore
):
577 rePipeline
= self
._fileTrace
.arr_pipelines
[nPipeline
]
578 rePipeline
= rePipeline
.replace("[", "\[").replace("]", "\]")
579 reCore
= 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
580 sSubs
= 'core = ' + sCore
+ '\n'
582 reg_pipeline
= re
.compile(rePipeline
)
583 search_match
= reg_pipeline
.search(self
._fileTrace
.in_buf
)
586 pos
= search_match
.start()
587 substr1
= self
._fileTrace
.in_buf
[:pos
]
588 substr2
= self
._fileTrace
.in_buf
[pos
:]
589 substr2
= re
.sub(reCore
, sSubs
, substr2
, 1)
590 self
._fileTrace
.in_buf
= substr1
+ substr2
592 def pipelines2cores(self
, n
, n_bits
, nCore
, bHT
):
599 cond
= (n
& (1 << i
))
601 # update the pipelines array to match the core
602 # only in case of cond match
603 # PIPELINE0 and core 0 are reserved
605 tmpCore
= fileTrace
.in_physical_cores
[nCore
] + 'h'
606 self
.arr2_pipelines2cores
[pipeline_idx
] = tmpCore
608 self
.arr2_pipelines2cores
[pipeline_idx
] = \
609 fileTrace
.in_physical_cores
[nCore
]
614 def stage2_process_file(self
):
615 outFileName
= os
.path
.join(self
._fileTrace
.out_path
,
616 self
._fileTrace
.prefix_outfile
)
617 outFileName
+= "_{}CoReS".format(self
.n_cores
)
619 for i
in range(0, self
.n_cores
):
620 mask
= len2mask(self
.cores
[i
].n_pipelines
)
621 pipelines_ht0
= self
.bitpos_apply((~self
.cores
[i
].counter
) & mask
,
622 self
.cores
[i
].bitpos
,
623 self
.cores
[i
].n_pipelines
)
625 pipelines_ht1
= self
.bitpos_apply(self
.cores
[i
].counter
,
626 self
.cores
[i
].bitpos
,
627 self
.cores
[i
].n_pipelines
)
629 # update pipelines to core mapping
630 self
.pipelines2cores(pipelines_ht0
, self
.n_pipelines
, i
, False)
631 self
.pipelines2cores(pipelines_ht1
, self
.n_pipelines
, i
, True)
633 # update the in_buf as per the arr_pipelines2cores
634 for pipeline_idx
in range(len(self
.arr2_pipelines2cores
)):
635 outFileName
+= "_{}".format(
636 self
.arr2_pipelines2cores
[pipeline_idx
])
637 self
.stage2_updateCoresInBuf(
638 pipeline_idx
, self
.arr2_pipelines2cores
[pipeline_idx
])
640 # by now the in_buf is all set to be written to file
641 outFileName
+= self
._fileTrace
.suffix_outfile
642 outputFile
= open(outFileName
, "w")
644 # write the file comments
645 strTruncated
= ("", "(Truncated)")[self
._fileTrace
.ncores_truncated
]
647 "; =============== Pipeline-to-Core Mapping ================\n"
648 "; Generated from file {}\n"
649 "; Input pipelines = {}\n"
650 "; Input cores = {}\n"
651 "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
653 self
._fileTrace
.in_file_namepath
,
654 fileTrace
.arr_pipelines
,
655 fileTrace
.in_physical_cores
,
656 self
._fileTrace
.n_pipelines
,
657 self
._fileTrace
.n_cores
,
659 self
._fileTrace
.hyper_thread
))
665 "; ========================================================\n"
668 stg0cmt
=self
.stage0_file_comment
,
669 stg1cmt
=self
.stage1_file_comment
,
670 stg2cmt
=self
.stage2_file_comment
))
672 # write the buffer contents
673 outputFile
.write(self
._fileTrace
.in_buf
)
677 def stage2_process(self
):
679 while(i
< self
.n_cores
):
680 self
.cores
[i
].counter_max
= len2mask(
681 self
.cores
[i
].n_pipelines
- 1)
684 self
.pos
= self
.n_cores
- 1
686 if (self
.pos
== self
.n_cores
- 1):
689 if (self
.cores
[self
.pos
].counter
==
690 self
.cores
[self
.pos
].counter_max
):
694 self
.cores
[self
.pos
].counter
= 0
698 self
.cores
[self
.pos
].counter
+= 1
699 if(self
.pos
< self
.n_cores
- 1):
705 def __init__(self
, filenamepath
):
706 self
.in_file_namepath
= os
.path
.abspath(filenamepath
)
707 self
.in_filename
= os
.path
.basename(self
.in_file_namepath
)
708 self
.in_path
= os
.path
.dirname(self
.in_file_namepath
)
710 filenamesplit
= self
.in_filename
.split('.')
711 self
.prefix_outfile
= filenamesplit
[0]
712 self
.suffix_outfile
= ".cfg"
714 # output folder: in the same folder as input file
715 # create new folder in the name of input file
716 self
.out_path
= os
.path
.join(
717 os
.path
.abspath(os
.path
.dirname(__file__
)),
721 os
.makedirs(self
.out_path
)
722 except OSError as excep
:
723 if excep
.errno
== errno
.EEXIST
and os
.path
.isdir(self
.out_path
):
729 self
.arr_pipelines
= [] # holds the positions of search
732 self
.max_pipelines
= 15
734 self
.in_physical_cores
= None
735 self
.hyper_thread
= None
737 # save the num of pipelines determined from input file
739 # save the num of cores input (or the truncated value)
741 self
.ncores_truncated
= False
743 def print_TraceFile(self
):
744 print("self.in_file_namepath = ", self
.in_file_namepath
)
745 print("self.in_filename = ", self
.in_filename
)
746 print("self.in_path = ", self
.in_path
)
747 print("self.out_path = ", self
.out_path
)
748 print("self.prefix_outfile = ", self
.prefix_outfile
)
749 print("self.suffix_outfile = ", self
.suffix_outfile
)
750 print("self.in_buf = ", self
.in_buf
)
751 print("self.arr_pipelines =", self
.arr_pipelines
)
752 print("self.in_physical_cores", self
.in_physical_cores
)
753 print("self.hyper_thread", self
.hyper_thread
)
756 def process(n_cores
, n_pipelines
, fileTrace
):
757 '''process and map pipelines, cores.'''
759 sys
.exit('N_CORES is 0, exiting')
761 if (n_pipelines
== 0):
762 sys
.exit('N_PIPELINES is 0, exiting')
764 if (n_cores
> n_pipelines
):
765 print('\nToo many cores, truncating N_CORES to N_PIPELINES')
766 n_cores
= n_pipelines
767 fileTrace
.ncores_truncated
= True
769 fileTrace
.n_pipelines
= n_pipelines
770 fileTrace
.n_cores
= n_cores
772 strTruncated
= ("", "(Truncated)")[fileTrace
.ncores_truncated
]
773 print("N_PIPELINES = {}, N_CORES = {} {}"
774 .format(n_pipelines
, n_cores
, strTruncated
))
775 print("---------------------------------------------------------------")
777 ctx0_inst
= Context0()
778 ctx1_inst
= Context1()
779 ctx2_inst
= Context2()
781 # initialize the class variables
782 ctx1_inst
._fileTrace
= fileTrace
783 ctx2_inst
._fileTrace
= fileTrace
785 ctx0_inst
.stage0_init(n_cores
, n_pipelines
, ctx1_inst
, ctx2_inst
)
786 ctx0_inst
.stage0_process()
789 def validate_core(core
):
790 match
= reg_phycore
.match(core
)
797 def validate_phycores(phy_cores
):
798 '''validate physical cores, check if unique.'''
800 phy_cores
= phy_cores
.strip().split(',')
802 # check if the core list is unique
803 if(len(phy_cores
) != len(set(phy_cores
))):
804 print('list of physical cores has duplicates')
807 for core
in phy_cores
:
808 if not validate_core(core
):
809 print('invalid physical core specified.')
814 def scanconfigfile(fileTrace
):
815 '''scan input file for pipelines, validate then process.'''
817 filetoscan
= open(fileTrace
.in_file_namepath
, 'r')
818 fileTrace
.in_buf
= filetoscan
.read()
820 # reset iterator on open file
823 # scan input file for pipelines
824 # master pipelines to be ignored
825 pattern_pipeline
= r
'\[PIPELINE\d*\]'
826 pattern_mastertype
= r
'type\s*=\s*MASTER'
828 pending_pipeline
= False
829 for line
in filetoscan
:
830 match_pipeline
= re
.search(pattern_pipeline
, line
)
831 match_type
= re
.search('type\s*=', line
)
832 match_mastertype
= re
.search(pattern_mastertype
, line
)
835 sPipeline
= line
[match_pipeline
.start():match_pipeline
.end()]
836 pending_pipeline
= True
838 # found a type definition...
839 if(match_mastertype
is None):
840 # and this is not a master pipeline...
841 if(pending_pipeline
):
842 # add it to the list of pipelines to be mapped
843 fileTrace
.arr_pipelines
.append(sPipeline
)
844 pending_pipeline
= False
846 # and this is a master pipeline...
847 # ignore the current and move on to next
849 pending_pipeline
= False
852 # validate if pipelines are unique
853 if(len(fileTrace
.arr_pipelines
) != len(set(fileTrace
.arr_pipelines
))):
854 sys
.exit('Error: duplicate pipelines in input file')
856 num_pipelines
= len(fileTrace
.arr_pipelines
)
857 num_cores
= len(fileTrace
.in_physical_cores
)
859 print("-------------------Pipeline-to-core mapping--------------------")
860 print("Input pipelines = {}\nInput cores = {}"
861 .format(fileTrace
.arr_pipelines
, fileTrace
.in_physical_cores
))
863 # input configuration file validations goes here
864 if (num_cores
> fileTrace
.max_cores
):
865 sys
.exit('Error: number of cores specified > max_cores (%d)' %
868 if (num_pipelines
> fileTrace
.max_pipelines
):
869 sys
.exit('Error: number of pipelines in input \
870 cfg file > max_pipelines (%d)' % fileTrace
.max_pipelines
)
872 # call process to generate pipeline-to-core mapping, trace and log
873 process(num_cores
, num_pipelines
, fileTrace
)
876 if __name__
== "__main__":
877 parser
= argparse
.ArgumentParser(description
='mappipelines')
879 reqNamedGrp
= parser
.add_argument_group('required named args')
880 reqNamedGrp
.add_argument(
883 type=argparse
.FileType('r'),
884 help='Input config file',
887 reqNamedGrp
.add_argument(
890 type=validate_phycores
,
891 help='''Enter available CPU cores in
892 format:\"<core>,<core>,...\"
893 where each core format: \"s<SOCKETID>c<COREID>\"
894 where SOCKETID={0..9}, COREID={1-99}''',
897 # add optional arguments
901 help='enable/disable hyper threading. default is ON',
903 choices
=['ON', 'OFF'])
908 help='''disable output config file generation.
909 Output file generation is enabled by default''',
912 args
= parser
.parse_args()
914 if(args
.physical_cores
is None):
915 parser
.error("invalid physical_cores specified")
917 # create object of FileTrace and initialise
918 fileTrace
= FileTrace(args
.input_file
.name
)
919 fileTrace
.in_physical_cores
= args
.physical_cores
920 fileTrace
.hyper_thread
= args
.hyper_thread
922 if(fileTrace
.hyper_thread
== 'OFF'):
923 print("!!!!disabling stage2 HT!!!!")
924 enable_stage2_traceout
= 0
925 enable_stage2_fileout
= 0
926 elif(fileTrace
.hyper_thread
== 'ON'):
927 print("!!!!HT enabled. disabling stage1 file generation.!!!!")
928 enable_stage1_fileout
= 0
930 if(args
.no_output_file
is True):
931 print("!!!!disabling stage1 and stage2 fileout!!!!")
932 enable_stage1_fileout
= 0
933 enable_stage2_fileout
= 0
935 scanconfigfile(fileTrace
)