]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / examples / ip_pipeline / config / pipeline-to-core-mapping.py
1 #!/usr/bin/env python
2
3 # BSD LICENSE
4 #
5 # Copyright(c) 2016 Intel Corporation. All rights reserved.
6 # All rights reserved.
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions
10 # are met:
11 #
12 # * Redistributions of source code must retain the above copyright
13 # notice, this list of conditions and the following disclaimer.
14 # * Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in
16 # the documentation and/or other materials provided with the
17 # distribution.
18 # * Neither the name of Intel Corporation nor the names of its
19 # contributors may be used to endorse or promote products derived
20 # from this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34 #
35 # This script maps the set of pipelines identified (MASTER pipelines are
36 # ignored) from the input configuration file to the set of cores
37 # provided as input argument and creates configuration files for each of
38 # the mapping combinations.
39 #
40
41 from __future__ import print_function
42 import sys
43 import errno
44 import os
45 import re
46 import array
47 import itertools
48 import re
49 import argparse
50 from collections import namedtuple
51
52 # default values
53 enable_stage0_traceout = 1
54 enable_stage1_traceout = 1
55 enable_stage2_traceout = 1
56
57 enable_stage1_fileout = 1
58 enable_stage2_fileout = 1
59
60 Constants = namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
61 constants = Constants(16, 64)
62
63 # pattern for physical core
64 pattern_phycore = '^(s|S)\d(c|C)[1-9][0-9]*$'
65 reg_phycore = re.compile(pattern_phycore)
66
67
68 def popcount(mask):
69 return bin(mask).count("1")
70
71
72 def len2mask(length):
73 if (length == 0):
74 return 0
75
76 if (length > 64):
77 sys.exit('error: len2mask - length %i > 64. exiting' % length)
78
79 return int('1' * length, 2)
80
81
82 def bitstring_write(n, n_bits):
83 tmpstr = ""
84 if (n_bits > 64):
85 return
86
87 i = n_bits - 1
88 while (i >= 0):
89 cond = (n & (1 << i))
90 if (cond):
91 print('1', end='')
92 tmpstr += '1'
93 else:
94 print('0', end='')
95 tmpstr += '0'
96 i -= 1
97 return tmpstr
98
99
100 class Cores0:
101
102 def __init__(self):
103 self.n_pipelines = 0
104
105
106 class Cores1:
107
108 def __init__(self):
109 self.pipelines = 0
110 self.n_pipelines = 0
111
112
113 class Cores2:
114
115 def __init__(self):
116 self.pipelines = 0
117 self.n_pipelines = 0
118 self.counter = 0
119 self.counter_max = 0
120 self.bitpos = array.array(
121 "L", itertools.repeat(0, constants.MAX_PIPELINES))
122
123
124 class Context0:
125
126 def __init__(self):
127 self.cores = [Cores0() for i in range(0, constants.MAX_CORES)]
128 self.n_cores = 0
129 self.n_pipelines = 0
130 self.n_pipelines0 = 0
131 self.pos = 0
132 self.file_comment = ""
133 self.ctx1 = None
134 self.ctx2 = None
135
136 def stage0_print(self):
137 print('printing Context0 obj')
138 print('c0.cores(n_pipelines) = [ ', end='')
139 for cores_count in range(0, constants.MAX_CORES):
140 print(self.cores[cores_count].n_pipelines, end=' ')
141 print(']')
142 print('c0.n_cores = %d' % self.n_cores)
143 print('c0.n_pipelines = %d' % self.n_pipelines)
144 print('c0.n_pipelines0 = %d' % self.n_pipelines0)
145 print('c0.pos = %d' % self.pos)
146 print('c0.file_comment = %s' % self.file_comment)
147 if (self.ctx1 is not None):
148 print('c0.ctx1 = ', end='')
149 print(repr(self.ctx1))
150 else:
151 print('c0.ctx1 = None')
152
153 if (self.ctx2 is not None):
154 print('c0.ctx2 = ', end='')
155 print(repr(self.ctx2))
156 else:
157 print('c0.ctx2 = None')
158
159 def stage0_init(self, num_cores, num_pipelines, ctx1, ctx2):
160 self.n_cores = num_cores
161 self.n_pipelines = num_pipelines
162 self.ctx1 = ctx1
163 self.ctx2 = ctx2
164
165 def stage0_process(self):
166 # stage0 init
167 self.cores[0].n_pipelines = self.n_pipelines
168 self.n_pipelines0 = 0
169 self.pos = 1
170
171 while True:
172 # go forward
173 while True:
174 if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0)):
175 self.cores[self.pos].n_pipelines = min(
176 self.cores[self.pos - 1].n_pipelines,
177 self.n_pipelines0)
178 self.n_pipelines0 -= self.cores[self.pos].n_pipelines
179 self.pos += 1
180 else:
181 break
182
183 # check solution
184 if (self.n_pipelines0 == 0):
185 self.stage0_log()
186 self.ctx1.stage1_init(self, self.ctx2) # self is object c0
187 self.ctx1.stage1_process()
188
189 # go backward
190 while True:
191 if (self.pos == 0):
192 return
193
194 self.pos -= 1
195 if ((self.cores[self.pos].n_pipelines > 1) and
196 (self.pos != (self.n_cores - 1))):
197 break
198
199 self.n_pipelines0 += self.cores[self.pos].n_pipelines
200 self.cores[self.pos].n_pipelines = 0
201
202 # rearm
203 self.cores[self.pos].n_pipelines -= 1
204 self.n_pipelines0 += 1
205 self.pos += 1
206
207 def stage0_log(self):
208 tmp_file_comment = ""
209 if(enable_stage0_traceout != 1):
210 return
211
212 print('STAGE0: ', end='')
213 tmp_file_comment += 'STAGE0: '
214 for cores_count in range(0, self.n_cores):
215 print('C%d = %d\t'
216 % (cores_count,
217 self.cores[cores_count].n_pipelines), end='')
218 tmp_file_comment += "C{} = {}\t".format(
219 cores_count, self.cores[cores_count].n_pipelines)
220 # end for
221 print('')
222 self.ctx1.stage0_file_comment = tmp_file_comment
223 self.ctx2.stage0_file_comment = tmp_file_comment
224
225
226 class Context1:
227 _fileTrace = None
228
229 def __init__(self):
230 self.cores = [Cores1() for i in range(constants.MAX_CORES)]
231 self.n_cores = 0
232 self.n_pipelines = 0
233 self.pos = 0
234 self.stage0_file_comment = ""
235 self.stage1_file_comment = ""
236
237 self.ctx2 = None
238 self.arr_pipelines2cores = []
239
240 def stage1_reset(self):
241 for i in range(constants.MAX_CORES):
242 self.cores[i].pipelines = 0
243 self.cores[i].n_pipelines = 0
244
245 self.n_cores = 0
246 self.n_pipelines = 0
247 self.pos = 0
248 self.ctx2 = None
249 # clear list
250 del self.arr_pipelines2cores[:]
251
252 def stage1_print(self):
253 print('printing Context1 obj')
254 print('ctx1.cores(pipelines,n_pipelines) = [ ', end='')
255 for cores_count in range(0, constants.MAX_CORES):
256 print('(%d,%d)' % (self.cores[cores_count].pipelines,
257 self.cores[cores_count].n_pipelines), end=' ')
258 print(']')
259 print('ctx1.n_cores = %d' % self.n_cores)
260 print('ctx1.n_pipelines = %d' % self.n_pipelines)
261 print('ctx1.pos = %d' % self.pos)
262 print('ctx1.stage0_file_comment = %s' % self.stage0_file_comment)
263 print('ctx1.stage1_file_comment = %s' % self.stage1_file_comment)
264 if (self.ctx2 is not None):
265 print('ctx1.ctx2 = ', end='')
266 print(self.ctx2)
267 else:
268 print('ctx1.ctx2 = None')
269
270 def stage1_init(self, c0, ctx2):
271 self.stage1_reset()
272 self.n_cores = 0
273 while (c0.cores[self.n_cores].n_pipelines > 0):
274 self.n_cores += 1
275
276 self.n_pipelines = c0.n_pipelines
277 self.ctx2 = ctx2
278
279 self.arr_pipelines2cores = [0] * self.n_pipelines
280
281 i = 0
282 while (i < self.n_cores):
283 self.cores[i].n_pipelines = c0.cores[i].n_pipelines
284 i += 1
285
286 def stage1_process(self):
287 pipelines_max = len2mask(self.n_pipelines)
288 while True:
289 pos = 0
290 overlap = 0
291
292 if (self.cores[self.pos].pipelines == pipelines_max):
293 if (self.pos == 0):
294 return
295
296 self.cores[self.pos].pipelines = 0
297 self.pos -= 1
298 continue
299
300 self.cores[self.pos].pipelines += 1
301 if (popcount(self.cores[self.pos].pipelines) !=
302 self.cores[self.pos].n_pipelines):
303 continue
304
305 overlap = 0
306 pos = 0
307 while (pos < self.pos):
308 if ((self.cores[self.pos].pipelines) &
309 (self.cores[pos].pipelines)):
310 overlap = 1
311 break
312 pos += 1
313
314 if (overlap):
315 continue
316
317 if ((self.pos > 0) and
318 ((self.cores[self.pos].n_pipelines) ==
319 (self.cores[self.pos - 1].n_pipelines)) and
320 ((self.cores[self.pos].pipelines) <
321 (self.cores[self.pos - 1].pipelines))):
322 continue
323
324 if (self.pos == self.n_cores - 1):
325 self.stage1_log()
326 self.ctx2.stage2_init(self)
327 self.ctx2.stage2_process()
328
329 if (self.pos == 0):
330 return
331
332 self.cores[self.pos].pipelines = 0
333 self.pos -= 1
334 continue
335
336 self.pos += 1
337
338 def stage1_log(self):
339 tmp_file_comment = ""
340 if(enable_stage1_traceout == 1):
341 print('STAGE1: ', end='')
342 tmp_file_comment += 'STAGE1: '
343 i = 0
344 while (i < self.n_cores):
345 print('C%d = [' % i, end='')
346 tmp_file_comment += "C{} = [".format(i)
347
348 j = self.n_pipelines - 1
349 while (j >= 0):
350 cond = ((self.cores[i].pipelines) & (1 << j))
351 if (cond):
352 print('1', end='')
353 tmp_file_comment += '1'
354 else:
355 print('0', end='')
356 tmp_file_comment += '0'
357 j -= 1
358
359 print(']\t', end='')
360 tmp_file_comment += ']\t'
361 i += 1
362
363 print('\n', end='')
364 self.stage1_file_comment = tmp_file_comment
365 self.ctx2.stage1_file_comment = tmp_file_comment
366
367 # check if file traceing is enabled
368 if(enable_stage1_fileout != 1):
369 return
370
371 # spit out the combination to file
372 self.stage1_process_file()
373
374 def stage1_updateCoresInBuf(self, nPipeline, sCore):
375 rePipeline = self._fileTrace.arr_pipelines[nPipeline]
376 rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
377 reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
378 sSubs = 'core = ' + sCore + '\n'
379
380 reg_pipeline = re.compile(rePipeline)
381 search_match = reg_pipeline.search(self._fileTrace.in_buf)
382
383 if(search_match):
384 pos = search_match.start()
385 substr1 = self._fileTrace.in_buf[:pos]
386 substr2 = self._fileTrace.in_buf[pos:]
387 substr2 = re.sub(reCore, sSubs, substr2, 1)
388 self._fileTrace.in_buf = substr1 + substr2
389
390 def stage1_process_file(self):
391 outFileName = os.path.join(self._fileTrace.out_path,
392 self._fileTrace.prefix_outfile)
393 outFileName += "_{}CoReS".format(self.n_cores)
394
395 i = 0 # represents core number
396 while (i < self.n_cores):
397 j = self.n_pipelines - 1
398 pipeline_idx = 0
399 while(j >= 0):
400 cond = ((self.cores[i].pipelines) & (1 << j))
401 if (cond):
402 # update the pipelines array to match the core
403 # only in case of cond match
404 self.arr_pipelines2cores[
405 pipeline_idx] = fileTrace.in_physical_cores[i]
406
407 j -= 1
408 pipeline_idx += 1
409
410 i += 1
411
412 # update the in_buf as per the arr_pipelines2cores
413 for pipeline_idx in range(len(self.arr_pipelines2cores)):
414 outFileName += "_{}".format(self.arr_pipelines2cores[pipeline_idx])
415 self.stage1_updateCoresInBuf(
416 pipeline_idx, self.arr_pipelines2cores[pipeline_idx])
417
418 # by now the in_buf is all set to be written to file
419 outFileName += self._fileTrace.suffix_outfile
420 outputFile = open(outFileName, "w")
421
422 # write out the comments
423 strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
424 outputFile.write(
425 "; =============== Pipeline-to-Core Mapping ================\n"
426 "; Generated from file {}\n"
427 "; Input pipelines = {}\n"
428 "; Input cores = {}\n"
429 "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
430 .format(
431 self._fileTrace.in_file_namepath,
432 fileTrace.arr_pipelines,
433 fileTrace.in_physical_cores,
434 self._fileTrace.n_pipelines,
435 self._fileTrace.n_cores,
436 strTruncated,
437 self._fileTrace.hyper_thread))
438
439 outputFile.write(
440 "; {stg0cmt}\n"
441 "; {stg1cmt}\n"
442 "; ========================================================\n"
443 "; \n"
444 .format(
445 stg0cmt=self.stage0_file_comment,
446 stg1cmt=self.stage1_file_comment))
447
448 # write buffer contents
449 outputFile.write(self._fileTrace.in_buf)
450 outputFile.flush()
451 outputFile.close()
452
453
454 class Context2:
455 _fileTrace = None
456
457 def __init__(self):
458 self.cores = [Cores2() for i in range(constants.MAX_CORES)]
459 self.n_cores = 0
460 self.n_pipelines = 0
461 self.pos = 0
462 self.stage0_file_comment = ""
463 self.stage1_file_comment = ""
464 self.stage2_file_comment = ""
465
466 # each array entry is a pipeline mapped to core stored as string
467 # pipeline ranging from 1 to n, however stored in zero based array
468 self.arr2_pipelines2cores = []
469
470 def stage2_print(self):
471 print('printing Context2 obj')
472 print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
473 for cores_count in range(0, constants.MAX_CORES):
474 print('core[%d] = (%d,%d,%d,%d)' % (
475 cores_count,
476 self.cores[cores_count].pipelines,
477 self.cores[cores_count].n_pipelines,
478 self.cores[cores_count].counter,
479 self.cores[cores_count].counter_max))
480
481 print('ctx2.n_cores = %d' % self.n_cores, end='')
482 print('ctx2.n_pipelines = %d' % self.n_pipelines, end='')
483 print('ctx2.pos = %d' % self.pos)
484 print('ctx2.stage0_file_comment = %s' %
485 self.self.stage0_file_comment)
486 print('ctx2.stage1_file_comment = %s' %
487 self.self.stage1_file_comment)
488 print('ctx2.stage2_file_comment = %s' %
489 self.self.stage2_file_comment)
490
491 def stage2_reset(self):
492 for i in range(0, constants.MAX_CORES):
493 self.cores[i].pipelines = 0
494 self.cores[i].n_pipelines = 0
495 self.cores[i].counter = 0
496 self.cores[i].counter_max = 0
497
498 for idx in range(0, constants.MAX_PIPELINES):
499 self.cores[i].bitpos[idx] = 0
500
501 self.n_cores = 0
502 self.n_pipelines = 0
503 self.pos = 0
504 # clear list
505 del self.arr2_pipelines2cores[:]
506
507 def bitpos_load(self, coreidx):
508 i = j = 0
509 while (i < self.n_pipelines):
510 if ((self.cores[coreidx].pipelines) &
511 (1 << i)):
512 self.cores[coreidx].bitpos[j] = i
513 j += 1
514 i += 1
515 self.cores[coreidx].n_pipelines = j
516
517 def bitpos_apply(self, in_buf, pos, n_pos):
518 out = 0
519 for i in range(0, n_pos):
520 out |= (in_buf & (1 << i)) << (pos[i] - i)
521
522 return out
523
524 def stage2_init(self, ctx1):
525 self.stage2_reset()
526 self.n_cores = ctx1.n_cores
527 self.n_pipelines = ctx1.n_pipelines
528
529 self.arr2_pipelines2cores = [''] * self.n_pipelines
530
531 core_idx = 0
532 while (core_idx < self.n_cores):
533 self.cores[core_idx].pipelines = ctx1.cores[core_idx].pipelines
534
535 self.bitpos_load(core_idx)
536 core_idx += 1
537
538 def stage2_log(self):
539 tmp_file_comment = ""
540 if(enable_stage2_traceout == 1):
541 print('STAGE2: ', end='')
542 tmp_file_comment += 'STAGE2: '
543
544 for i in range(0, self.n_cores):
545 mask = len2mask(self.cores[i].n_pipelines)
546 pipelines_ht0 = self.bitpos_apply(
547 (~self.cores[i].counter) & mask,
548 self.cores[i].bitpos,
549 self.cores[i].n_pipelines)
550
551 pipelines_ht1 = self.bitpos_apply(
552 self.cores[i].counter,
553 self.cores[i].bitpos,
554 self.cores[i].n_pipelines)
555
556 print('C%dHT0 = [' % i, end='')
557 tmp_file_comment += "C{}HT0 = [".format(i)
558 tmp_file_comment += bitstring_write(
559 pipelines_ht0, self.n_pipelines)
560
561 print(']\tC%dHT1 = [' % i, end='')
562 tmp_file_comment += "]\tC{}HT1 = [".format(i)
563 tmp_file_comment += bitstring_write(
564 pipelines_ht1, self.n_pipelines)
565 print(']\t', end='')
566 tmp_file_comment += ']\t'
567
568 print('')
569 self.stage2_file_comment = tmp_file_comment
570
571 # check if file traceing is enabled
572 if(enable_stage2_fileout != 1):
573 return
574 # spit out the combination to file
575 self.stage2_process_file()
576
577 def stage2_updateCoresInBuf(self, nPipeline, sCore):
578 rePipeline = self._fileTrace.arr_pipelines[nPipeline]
579 rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
580 reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
581 sSubs = 'core = ' + sCore + '\n'
582
583 reg_pipeline = re.compile(rePipeline)
584 search_match = reg_pipeline.search(self._fileTrace.in_buf)
585
586 if(search_match):
587 pos = search_match.start()
588 substr1 = self._fileTrace.in_buf[:pos]
589 substr2 = self._fileTrace.in_buf[pos:]
590 substr2 = re.sub(reCore, sSubs, substr2, 1)
591 self._fileTrace.in_buf = substr1 + substr2
592
593 def pipelines2cores(self, n, n_bits, nCore, bHT):
594 if (n_bits > 64):
595 return
596
597 i = n_bits - 1
598 pipeline_idx = 0
599 while (i >= 0):
600 cond = (n & (1 << i))
601 if (cond):
602 # update the pipelines array to match the core
603 # only in case of cond match
604 # PIPELINE0 and core 0 are reserved
605 if(bHT):
606 tmpCore = fileTrace.in_physical_cores[nCore] + 'h'
607 self.arr2_pipelines2cores[pipeline_idx] = tmpCore
608 else:
609 self.arr2_pipelines2cores[pipeline_idx] = \
610 fileTrace.in_physical_cores[nCore]
611
612 i -= 1
613 pipeline_idx += 1
614
615 def stage2_process_file(self):
616 outFileName = os.path.join(self._fileTrace.out_path,
617 self._fileTrace.prefix_outfile)
618 outFileName += "_{}CoReS".format(self.n_cores)
619
620 for i in range(0, self.n_cores):
621 mask = len2mask(self.cores[i].n_pipelines)
622 pipelines_ht0 = self.bitpos_apply((~self.cores[i].counter) & mask,
623 self.cores[i].bitpos,
624 self.cores[i].n_pipelines)
625
626 pipelines_ht1 = self.bitpos_apply(self.cores[i].counter,
627 self.cores[i].bitpos,
628 self.cores[i].n_pipelines)
629
630 # update pipelines to core mapping
631 self.pipelines2cores(pipelines_ht0, self.n_pipelines, i, False)
632 self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, True)
633
634 # update the in_buf as per the arr_pipelines2cores
635 for pipeline_idx in range(len(self.arr2_pipelines2cores)):
636 outFileName += "_{}".format(
637 self.arr2_pipelines2cores[pipeline_idx])
638 self.stage2_updateCoresInBuf(
639 pipeline_idx, self.arr2_pipelines2cores[pipeline_idx])
640
641 # by now the in_buf is all set to be written to file
642 outFileName += self._fileTrace.suffix_outfile
643 outputFile = open(outFileName, "w")
644
645 # write the file comments
646 strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
647 outputFile.write(
648 "; =============== Pipeline-to-Core Mapping ================\n"
649 "; Generated from file {}\n"
650 "; Input pipelines = {}\n"
651 "; Input cores = {}\n"
652 "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
653 .format(
654 self._fileTrace.in_file_namepath,
655 fileTrace.arr_pipelines,
656 fileTrace.in_physical_cores,
657 self._fileTrace.n_pipelines,
658 self._fileTrace.n_cores,
659 strTruncated,
660 self._fileTrace.hyper_thread))
661
662 outputFile.write(
663 "; {stg0cmt}\n"
664 "; {stg1cmt}\n"
665 "; {stg2cmt}\n"
666 "; ========================================================\n"
667 "; \n"
668 .format(
669 stg0cmt=self.stage0_file_comment,
670 stg1cmt=self.stage1_file_comment,
671 stg2cmt=self.stage2_file_comment))
672
673 # write the buffer contents
674 outputFile.write(self._fileTrace.in_buf)
675 outputFile.flush()
676 outputFile.close()
677
678 def stage2_process(self):
679 i = 0
680 while(i < self.n_cores):
681 self.cores[i].counter_max = len2mask(
682 self.cores[i].n_pipelines - 1)
683 i += 1
684
685 self.pos = self.n_cores - 1
686 while True:
687 if (self.pos == self.n_cores - 1):
688 self.stage2_log()
689
690 if (self.cores[self.pos].counter ==
691 self.cores[self.pos].counter_max):
692 if (self.pos == 0):
693 return
694
695 self.cores[self.pos].counter = 0
696 self.pos -= 1
697 continue
698
699 self.cores[self.pos].counter += 1
700 if(self.pos < self.n_cores - 1):
701 self.pos += 1
702
703
704 class FileTrace:
705
706 def __init__(self, filenamepath):
707 self.in_file_namepath = os.path.abspath(filenamepath)
708 self.in_filename = os.path.basename(self.in_file_namepath)
709 self.in_path = os.path.dirname(self.in_file_namepath)
710
711 filenamesplit = self.in_filename.split('.')
712 self.prefix_outfile = filenamesplit[0]
713 self.suffix_outfile = ".cfg"
714
715 # output folder: in the same folder as input file
716 # create new folder in the name of input file
717 self.out_path = os.path.join(
718 os.path.abspath(os.path.dirname(__file__)),
719 self.prefix_outfile)
720
721 try:
722 os.makedirs(self.out_path)
723 except OSError as excep:
724 if excep.errno == errno.EEXIST and os.path.isdir(self.out_path):
725 pass
726 else:
727 raise
728
729 self.in_buf = None
730 self.arr_pipelines = [] # holds the positions of search
731
732 self.max_cores = 15
733 self.max_pipelines = 15
734
735 self.in_physical_cores = None
736 self.hyper_thread = None
737
738 # save the num of pipelines determined from input file
739 self.n_pipelines = 0
740 # save the num of cores input (or the truncated value)
741 self.n_cores = 0
742 self.ncores_truncated = False
743
744 def print_TraceFile(self):
745 print("self.in_file_namepath = ", self.in_file_namepath)
746 print("self.in_filename = ", self.in_filename)
747 print("self.in_path = ", self.in_path)
748 print("self.out_path = ", self.out_path)
749 print("self.prefix_outfile = ", self.prefix_outfile)
750 print("self.suffix_outfile = ", self.suffix_outfile)
751 print("self.in_buf = ", self.in_buf)
752 print("self.arr_pipelines =", self.arr_pipelines)
753 print("self.in_physical_cores", self.in_physical_cores)
754 print("self.hyper_thread", self.hyper_thread)
755
756
757 def process(n_cores, n_pipelines, fileTrace):
758 '''process and map pipelines, cores.'''
759 if (n_cores == 0):
760 sys.exit('N_CORES is 0, exiting')
761
762 if (n_pipelines == 0):
763 sys.exit('N_PIPELINES is 0, exiting')
764
765 if (n_cores > n_pipelines):
766 print('\nToo many cores, truncating N_CORES to N_PIPELINES')
767 n_cores = n_pipelines
768 fileTrace.ncores_truncated = True
769
770 fileTrace.n_pipelines = n_pipelines
771 fileTrace.n_cores = n_cores
772
773 strTruncated = ("", "(Truncated)")[fileTrace.ncores_truncated]
774 print("N_PIPELINES = {}, N_CORES = {} {}"
775 .format(n_pipelines, n_cores, strTruncated))
776 print("---------------------------------------------------------------")
777
778 ctx0_inst = Context0()
779 ctx1_inst = Context1()
780 ctx2_inst = Context2()
781
782 # initialize the class variables
783 ctx1_inst._fileTrace = fileTrace
784 ctx2_inst._fileTrace = fileTrace
785
786 ctx0_inst.stage0_init(n_cores, n_pipelines, ctx1_inst, ctx2_inst)
787 ctx0_inst.stage0_process()
788
789
790 def validate_core(core):
791 match = reg_phycore.match(core)
792 if(match):
793 return True
794 else:
795 return False
796
797
798 def validate_phycores(phy_cores):
799 '''validate physical cores, check if unique.'''
800 # eat up whitespaces
801 phy_cores = phy_cores.strip().split(',')
802
803 # check if the core list is unique
804 if(len(phy_cores) != len(set(phy_cores))):
805 print('list of physical cores has duplicates')
806 return None
807
808 for core in phy_cores:
809 if not validate_core(core):
810 print('invalid physical core specified.')
811 return None
812 return phy_cores
813
814
815 def scanconfigfile(fileTrace):
816 '''scan input file for pipelines, validate then process.'''
817 # open file
818 filetoscan = open(fileTrace.in_file_namepath, 'r')
819 fileTrace.in_buf = filetoscan.read()
820
821 # reset iterator on open file
822 filetoscan.seek(0)
823
824 # scan input file for pipelines
825 # master pipelines to be ignored
826 pattern_pipeline = r'\[PIPELINE\d*\]'
827 pattern_mastertype = r'type\s*=\s*MASTER'
828
829 pending_pipeline = False
830 for line in filetoscan:
831 match_pipeline = re.search(pattern_pipeline, line)
832 match_type = re.search('type\s*=', line)
833 match_mastertype = re.search(pattern_mastertype, line)
834
835 if(match_pipeline):
836 sPipeline = line[match_pipeline.start():match_pipeline.end()]
837 pending_pipeline = True
838 elif(match_type):
839 # found a type definition...
840 if(match_mastertype is None):
841 # and this is not a master pipeline...
842 if(pending_pipeline):
843 # add it to the list of pipelines to be mapped
844 fileTrace.arr_pipelines.append(sPipeline)
845 pending_pipeline = False
846 else:
847 # and this is a master pipeline...
848 # ignore the current and move on to next
849 sPipeline = ""
850 pending_pipeline = False
851 filetoscan.close()
852
853 # validate if pipelines are unique
854 if(len(fileTrace.arr_pipelines) != len(set(fileTrace.arr_pipelines))):
855 sys.exit('Error: duplicate pipelines in input file')
856
857 num_pipelines = len(fileTrace.arr_pipelines)
858 num_cores = len(fileTrace.in_physical_cores)
859
860 print("-------------------Pipeline-to-core mapping--------------------")
861 print("Input pipelines = {}\nInput cores = {}"
862 .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores))
863
864 # input configuration file validations goes here
865 if (num_cores > fileTrace.max_cores):
866 sys.exit('Error: number of cores specified > max_cores (%d)' %
867 fileTrace.max_cores)
868
869 if (num_pipelines > fileTrace.max_pipelines):
870 sys.exit('Error: number of pipelines in input \
871 cfg file > max_pipelines (%d)' % fileTrace.max_pipelines)
872
873 # call process to generate pipeline-to-core mapping, trace and log
874 process(num_cores, num_pipelines, fileTrace)
875
876
877 if __name__ == "__main__":
878 parser = argparse.ArgumentParser(description='mappipelines')
879
880 reqNamedGrp = parser.add_argument_group('required named args')
881 reqNamedGrp.add_argument(
882 '-i',
883 '--input-file',
884 type=argparse.FileType('r'),
885 help='Input config file',
886 required=True)
887
888 reqNamedGrp.add_argument(
889 '-pc',
890 '--physical-cores',
891 type=validate_phycores,
892 help='''Enter available CPU cores in
893 format:\"<core>,<core>,...\"
894 where each core format: \"s<SOCKETID>c<COREID>\"
895 where SOCKETID={0..9}, COREID={1-99}''',
896 required=True)
897
898 # add optional arguments
899 parser.add_argument(
900 '-ht',
901 '--hyper-thread',
902 help='enable/disable hyper threading. default is ON',
903 default='ON',
904 choices=['ON', 'OFF'])
905
906 parser.add_argument(
907 '-nO',
908 '--no-output-file',
909 help='''disable output config file generation.
910 Output file generation is enabled by default''',
911 action="store_true")
912
913 args = parser.parse_args()
914
915 if(args.physical_cores is None):
916 parser.error("invalid physical_cores specified")
917
918 # create object of FileTrace and initialise
919 fileTrace = FileTrace(args.input_file.name)
920 fileTrace.in_physical_cores = args.physical_cores
921 fileTrace.hyper_thread = args.hyper_thread
922
923 if(fileTrace.hyper_thread == 'OFF'):
924 print("!!!!disabling stage2 HT!!!!")
925 enable_stage2_traceout = 0
926 enable_stage2_fileout = 0
927 elif(fileTrace.hyper_thread == 'ON'):
928 print("!!!!HT enabled. disabling stage1 file generation.!!!!")
929 enable_stage1_fileout = 0
930
931 if(args.no_output_file is True):
932 print("!!!!disabling stage1 and stage2 fileout!!!!")
933 enable_stage1_fileout = 0
934 enable_stage2_fileout = 0
935
936 scanconfigfile(fileTrace)