]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / examples / ip_pipeline / config / pipeline-to-core-mapping.py
1 #!/usr/bin/env python
2
3 # BSD LICENSE
4 #
5 # Copyright(c) 2016 Intel Corporation. All rights reserved.
6 # All rights reserved.
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions
10 # are met:
11 #
12 # * Redistributions of source code must retain the above copyright
13 # notice, this list of conditions and the following disclaimer.
14 # * Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in
16 # the documentation and/or other materials provided with the
17 # distribution.
18 # * Neither the name of Intel Corporation nor the names of its
19 # contributors may be used to endorse or promote products derived
20 # from this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34 #
35 # This script maps the set of pipelines identified (MASTER pipelines are
36 # ignored) from the input configuration file to the set of cores
37 # provided as input argument and creates configuration files for each of
38 # the mapping combinations.
39 #
40
41 from __future__ import print_function
42 from collections import namedtuple
43 import argparse
44 import array
45 import errno
46 import itertools
47 import os
48 import re
49 import sys
50
51 # default values
52 enable_stage0_traceout = 1
53 enable_stage1_traceout = 1
54 enable_stage2_traceout = 1
55
56 enable_stage1_fileout = 1
57 enable_stage2_fileout = 1
58
59 Constants = namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
60 constants = Constants(16, 64)
61
62 # pattern for physical core
63 pattern_phycore = '^(s|S)\d(c|C)[1-9][0-9]*$'
64 reg_phycore = re.compile(pattern_phycore)
65
66
67 def popcount(mask):
68 return bin(mask).count("1")
69
70
71 def len2mask(length):
72 if (length == 0):
73 return 0
74
75 if (length > 64):
76 sys.exit('error: len2mask - length %i > 64. exiting' % length)
77
78 return int('1' * length, 2)
79
80
81 def bitstring_write(n, n_bits):
82 tmpstr = ""
83 if (n_bits > 64):
84 return
85
86 i = n_bits - 1
87 while (i >= 0):
88 cond = (n & (1 << i))
89 if (cond):
90 print('1', end='')
91 tmpstr += '1'
92 else:
93 print('0', end='')
94 tmpstr += '0'
95 i -= 1
96 return tmpstr
97
98
99 class Cores0:
100
101 def __init__(self):
102 self.n_pipelines = 0
103
104
105 class Cores1:
106
107 def __init__(self):
108 self.pipelines = 0
109 self.n_pipelines = 0
110
111
112 class Cores2:
113
114 def __init__(self):
115 self.pipelines = 0
116 self.n_pipelines = 0
117 self.counter = 0
118 self.counter_max = 0
119 self.bitpos = array.array(
120 "L", itertools.repeat(0, constants.MAX_PIPELINES))
121
122
123 class Context0:
124
125 def __init__(self):
126 self.cores = [Cores0() for i in range(0, constants.MAX_CORES)]
127 self.n_cores = 0
128 self.n_pipelines = 0
129 self.n_pipelines0 = 0
130 self.pos = 0
131 self.file_comment = ""
132 self.ctx1 = None
133 self.ctx2 = None
134
135 def stage0_print(self):
136 print('printing Context0 obj')
137 print('c0.cores(n_pipelines) = [ ', end='')
138 for cores_count in range(0, constants.MAX_CORES):
139 print(self.cores[cores_count].n_pipelines, end=' ')
140 print(']')
141 print('c0.n_cores = %d' % self.n_cores)
142 print('c0.n_pipelines = %d' % self.n_pipelines)
143 print('c0.n_pipelines0 = %d' % self.n_pipelines0)
144 print('c0.pos = %d' % self.pos)
145 print('c0.file_comment = %s' % self.file_comment)
146 if (self.ctx1 is not None):
147 print('c0.ctx1 = ', end='')
148 print(repr(self.ctx1))
149 else:
150 print('c0.ctx1 = None')
151
152 if (self.ctx2 is not None):
153 print('c0.ctx2 = ', end='')
154 print(repr(self.ctx2))
155 else:
156 print('c0.ctx2 = None')
157
158 def stage0_init(self, num_cores, num_pipelines, ctx1, ctx2):
159 self.n_cores = num_cores
160 self.n_pipelines = num_pipelines
161 self.ctx1 = ctx1
162 self.ctx2 = ctx2
163
164 def stage0_process(self):
165 # stage0 init
166 self.cores[0].n_pipelines = self.n_pipelines
167 self.n_pipelines0 = 0
168 self.pos = 1
169
170 while True:
171 # go forward
172 while True:
173 if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0)):
174 self.cores[self.pos].n_pipelines = min(
175 self.cores[self.pos - 1].n_pipelines,
176 self.n_pipelines0)
177 self.n_pipelines0 -= self.cores[self.pos].n_pipelines
178 self.pos += 1
179 else:
180 break
181
182 # check solution
183 if (self.n_pipelines0 == 0):
184 self.stage0_log()
185 self.ctx1.stage1_init(self, self.ctx2) # self is object c0
186 self.ctx1.stage1_process()
187
188 # go backward
189 while True:
190 if (self.pos == 0):
191 return
192
193 self.pos -= 1
194 if ((self.cores[self.pos].n_pipelines > 1) and
195 (self.pos != (self.n_cores - 1))):
196 break
197
198 self.n_pipelines0 += self.cores[self.pos].n_pipelines
199 self.cores[self.pos].n_pipelines = 0
200
201 # rearm
202 self.cores[self.pos].n_pipelines -= 1
203 self.n_pipelines0 += 1
204 self.pos += 1
205
206 def stage0_log(self):
207 tmp_file_comment = ""
208 if(enable_stage0_traceout != 1):
209 return
210
211 print('STAGE0: ', end='')
212 tmp_file_comment += 'STAGE0: '
213 for cores_count in range(0, self.n_cores):
214 print('C%d = %d\t'
215 % (cores_count,
216 self.cores[cores_count].n_pipelines), end='')
217 tmp_file_comment += "C{} = {}\t".format(
218 cores_count, self.cores[cores_count].n_pipelines)
219 # end for
220 print('')
221 self.ctx1.stage0_file_comment = tmp_file_comment
222 self.ctx2.stage0_file_comment = tmp_file_comment
223
224
225 class Context1:
226 _fileTrace = None
227
228 def __init__(self):
229 self.cores = [Cores1() for i in range(constants.MAX_CORES)]
230 self.n_cores = 0
231 self.n_pipelines = 0
232 self.pos = 0
233 self.stage0_file_comment = ""
234 self.stage1_file_comment = ""
235
236 self.ctx2 = None
237 self.arr_pipelines2cores = []
238
239 def stage1_reset(self):
240 for i in range(constants.MAX_CORES):
241 self.cores[i].pipelines = 0
242 self.cores[i].n_pipelines = 0
243
244 self.n_cores = 0
245 self.n_pipelines = 0
246 self.pos = 0
247 self.ctx2 = None
248 # clear list
249 del self.arr_pipelines2cores[:]
250
251 def stage1_print(self):
252 print('printing Context1 obj')
253 print('ctx1.cores(pipelines,n_pipelines) = [ ', end='')
254 for cores_count in range(0, constants.MAX_CORES):
255 print('(%d,%d)' % (self.cores[cores_count].pipelines,
256 self.cores[cores_count].n_pipelines), end=' ')
257 print(']')
258 print('ctx1.n_cores = %d' % self.n_cores)
259 print('ctx1.n_pipelines = %d' % self.n_pipelines)
260 print('ctx1.pos = %d' % self.pos)
261 print('ctx1.stage0_file_comment = %s' % self.stage0_file_comment)
262 print('ctx1.stage1_file_comment = %s' % self.stage1_file_comment)
263 if (self.ctx2 is not None):
264 print('ctx1.ctx2 = ', end='')
265 print(self.ctx2)
266 else:
267 print('ctx1.ctx2 = None')
268
269 def stage1_init(self, c0, ctx2):
270 self.stage1_reset()
271 self.n_cores = 0
272 while (c0.cores[self.n_cores].n_pipelines > 0):
273 self.n_cores += 1
274
275 self.n_pipelines = c0.n_pipelines
276 self.ctx2 = ctx2
277
278 self.arr_pipelines2cores = [0] * self.n_pipelines
279
280 i = 0
281 while (i < self.n_cores):
282 self.cores[i].n_pipelines = c0.cores[i].n_pipelines
283 i += 1
284
285 def stage1_process(self):
286 pipelines_max = len2mask(self.n_pipelines)
287 while True:
288 pos = 0
289 overlap = 0
290
291 if (self.cores[self.pos].pipelines == pipelines_max):
292 if (self.pos == 0):
293 return
294
295 self.cores[self.pos].pipelines = 0
296 self.pos -= 1
297 continue
298
299 self.cores[self.pos].pipelines += 1
300 if (popcount(self.cores[self.pos].pipelines) !=
301 self.cores[self.pos].n_pipelines):
302 continue
303
304 overlap = 0
305 pos = 0
306 while (pos < self.pos):
307 if ((self.cores[self.pos].pipelines) &
308 (self.cores[pos].pipelines)):
309 overlap = 1
310 break
311 pos += 1
312
313 if (overlap):
314 continue
315
316 if ((self.pos > 0) and
317 ((self.cores[self.pos].n_pipelines) ==
318 (self.cores[self.pos - 1].n_pipelines)) and
319 ((self.cores[self.pos].pipelines) <
320 (self.cores[self.pos - 1].pipelines))):
321 continue
322
323 if (self.pos == self.n_cores - 1):
324 self.stage1_log()
325 self.ctx2.stage2_init(self)
326 self.ctx2.stage2_process()
327
328 if (self.pos == 0):
329 return
330
331 self.cores[self.pos].pipelines = 0
332 self.pos -= 1
333 continue
334
335 self.pos += 1
336
337 def stage1_log(self):
338 tmp_file_comment = ""
339 if(enable_stage1_traceout == 1):
340 print('STAGE1: ', end='')
341 tmp_file_comment += 'STAGE1: '
342 i = 0
343 while (i < self.n_cores):
344 print('C%d = [' % i, end='')
345 tmp_file_comment += "C{} = [".format(i)
346
347 j = self.n_pipelines - 1
348 while (j >= 0):
349 cond = ((self.cores[i].pipelines) & (1 << j))
350 if (cond):
351 print('1', end='')
352 tmp_file_comment += '1'
353 else:
354 print('0', end='')
355 tmp_file_comment += '0'
356 j -= 1
357
358 print(']\t', end='')
359 tmp_file_comment += ']\t'
360 i += 1
361
362 print('\n', end='')
363 self.stage1_file_comment = tmp_file_comment
364 self.ctx2.stage1_file_comment = tmp_file_comment
365
366 # check if file traceing is enabled
367 if(enable_stage1_fileout != 1):
368 return
369
370 # spit out the combination to file
371 self.stage1_process_file()
372
373 def stage1_updateCoresInBuf(self, nPipeline, sCore):
374 rePipeline = self._fileTrace.arr_pipelines[nPipeline]
375 rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
376 reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
377 sSubs = 'core = ' + sCore + '\n'
378
379 reg_pipeline = re.compile(rePipeline)
380 search_match = reg_pipeline.search(self._fileTrace.in_buf)
381
382 if(search_match):
383 pos = search_match.start()
384 substr1 = self._fileTrace.in_buf[:pos]
385 substr2 = self._fileTrace.in_buf[pos:]
386 substr2 = re.sub(reCore, sSubs, substr2, 1)
387 self._fileTrace.in_buf = substr1 + substr2
388
389 def stage1_process_file(self):
390 outFileName = os.path.join(self._fileTrace.out_path,
391 self._fileTrace.prefix_outfile)
392 outFileName += "_{}CoReS".format(self.n_cores)
393
394 i = 0 # represents core number
395 while (i < self.n_cores):
396 j = self.n_pipelines - 1
397 pipeline_idx = 0
398 while(j >= 0):
399 cond = ((self.cores[i].pipelines) & (1 << j))
400 if (cond):
401 # update the pipelines array to match the core
402 # only in case of cond match
403 self.arr_pipelines2cores[
404 pipeline_idx] = fileTrace.in_physical_cores[i]
405
406 j -= 1
407 pipeline_idx += 1
408
409 i += 1
410
411 # update the in_buf as per the arr_pipelines2cores
412 for pipeline_idx in range(len(self.arr_pipelines2cores)):
413 outFileName += "_{}".format(self.arr_pipelines2cores[pipeline_idx])
414 self.stage1_updateCoresInBuf(
415 pipeline_idx, self.arr_pipelines2cores[pipeline_idx])
416
417 # by now the in_buf is all set to be written to file
418 outFileName += self._fileTrace.suffix_outfile
419 outputFile = open(outFileName, "w")
420
421 # write out the comments
422 strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
423 outputFile.write(
424 "; =============== Pipeline-to-Core Mapping ================\n"
425 "; Generated from file {}\n"
426 "; Input pipelines = {}\n"
427 "; Input cores = {}\n"
428 "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
429 .format(
430 self._fileTrace.in_file_namepath,
431 fileTrace.arr_pipelines,
432 fileTrace.in_physical_cores,
433 self._fileTrace.n_pipelines,
434 self._fileTrace.n_cores,
435 strTruncated,
436 self._fileTrace.hyper_thread))
437
438 outputFile.write(
439 "; {stg0cmt}\n"
440 "; {stg1cmt}\n"
441 "; ========================================================\n"
442 "; \n"
443 .format(
444 stg0cmt=self.stage0_file_comment,
445 stg1cmt=self.stage1_file_comment))
446
447 # write buffer contents
448 outputFile.write(self._fileTrace.in_buf)
449 outputFile.flush()
450 outputFile.close()
451
452
453 class Context2:
454 _fileTrace = None
455
456 def __init__(self):
457 self.cores = [Cores2() for i in range(constants.MAX_CORES)]
458 self.n_cores = 0
459 self.n_pipelines = 0
460 self.pos = 0
461 self.stage0_file_comment = ""
462 self.stage1_file_comment = ""
463 self.stage2_file_comment = ""
464
465 # each array entry is a pipeline mapped to core stored as string
466 # pipeline ranging from 1 to n, however stored in zero based array
467 self.arr2_pipelines2cores = []
468
469 def stage2_print(self):
470 print('printing Context2 obj')
471 print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
472 for cores_count in range(0, constants.MAX_CORES):
473 print('core[%d] = (%d,%d,%d,%d)' % (
474 cores_count,
475 self.cores[cores_count].pipelines,
476 self.cores[cores_count].n_pipelines,
477 self.cores[cores_count].counter,
478 self.cores[cores_count].counter_max))
479
480 print('ctx2.n_cores = %d' % self.n_cores, end='')
481 print('ctx2.n_pipelines = %d' % self.n_pipelines, end='')
482 print('ctx2.pos = %d' % self.pos)
483 print('ctx2.stage0_file_comment = %s' %
484 self.self.stage0_file_comment)
485 print('ctx2.stage1_file_comment = %s' %
486 self.self.stage1_file_comment)
487 print('ctx2.stage2_file_comment = %s' %
488 self.self.stage2_file_comment)
489
490 def stage2_reset(self):
491 for i in range(0, constants.MAX_CORES):
492 self.cores[i].pipelines = 0
493 self.cores[i].n_pipelines = 0
494 self.cores[i].counter = 0
495 self.cores[i].counter_max = 0
496
497 for idx in range(0, constants.MAX_PIPELINES):
498 self.cores[i].bitpos[idx] = 0
499
500 self.n_cores = 0
501 self.n_pipelines = 0
502 self.pos = 0
503 # clear list
504 del self.arr2_pipelines2cores[:]
505
506 def bitpos_load(self, coreidx):
507 i = j = 0
508 while (i < self.n_pipelines):
509 if ((self.cores[coreidx].pipelines) &
510 (1 << i)):
511 self.cores[coreidx].bitpos[j] = i
512 j += 1
513 i += 1
514 self.cores[coreidx].n_pipelines = j
515
516 def bitpos_apply(self, in_buf, pos, n_pos):
517 out = 0
518 for i in range(0, n_pos):
519 out |= (in_buf & (1 << i)) << (pos[i] - i)
520
521 return out
522
523 def stage2_init(self, ctx1):
524 self.stage2_reset()
525 self.n_cores = ctx1.n_cores
526 self.n_pipelines = ctx1.n_pipelines
527
528 self.arr2_pipelines2cores = [''] * self.n_pipelines
529
530 core_idx = 0
531 while (core_idx < self.n_cores):
532 self.cores[core_idx].pipelines = ctx1.cores[core_idx].pipelines
533
534 self.bitpos_load(core_idx)
535 core_idx += 1
536
537 def stage2_log(self):
538 tmp_file_comment = ""
539 if(enable_stage2_traceout == 1):
540 print('STAGE2: ', end='')
541 tmp_file_comment += 'STAGE2: '
542
543 for i in range(0, self.n_cores):
544 mask = len2mask(self.cores[i].n_pipelines)
545 pipelines_ht0 = self.bitpos_apply(
546 (~self.cores[i].counter) & mask,
547 self.cores[i].bitpos,
548 self.cores[i].n_pipelines)
549
550 pipelines_ht1 = self.bitpos_apply(
551 self.cores[i].counter,
552 self.cores[i].bitpos,
553 self.cores[i].n_pipelines)
554
555 print('C%dHT0 = [' % i, end='')
556 tmp_file_comment += "C{}HT0 = [".format(i)
557 tmp_file_comment += bitstring_write(
558 pipelines_ht0, self.n_pipelines)
559
560 print(']\tC%dHT1 = [' % i, end='')
561 tmp_file_comment += "]\tC{}HT1 = [".format(i)
562 tmp_file_comment += bitstring_write(
563 pipelines_ht1, self.n_pipelines)
564 print(']\t', end='')
565 tmp_file_comment += ']\t'
566
567 print('')
568 self.stage2_file_comment = tmp_file_comment
569
570 # check if file traceing is enabled
571 if(enable_stage2_fileout != 1):
572 return
573 # spit out the combination to file
574 self.stage2_process_file()
575
576 def stage2_updateCoresInBuf(self, nPipeline, sCore):
577 rePipeline = self._fileTrace.arr_pipelines[nPipeline]
578 rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
579 reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
580 sSubs = 'core = ' + sCore + '\n'
581
582 reg_pipeline = re.compile(rePipeline)
583 search_match = reg_pipeline.search(self._fileTrace.in_buf)
584
585 if(search_match):
586 pos = search_match.start()
587 substr1 = self._fileTrace.in_buf[:pos]
588 substr2 = self._fileTrace.in_buf[pos:]
589 substr2 = re.sub(reCore, sSubs, substr2, 1)
590 self._fileTrace.in_buf = substr1 + substr2
591
592 def pipelines2cores(self, n, n_bits, nCore, bHT):
593 if (n_bits > 64):
594 return
595
596 i = n_bits - 1
597 pipeline_idx = 0
598 while (i >= 0):
599 cond = (n & (1 << i))
600 if (cond):
601 # update the pipelines array to match the core
602 # only in case of cond match
603 # PIPELINE0 and core 0 are reserved
604 if(bHT):
605 tmpCore = fileTrace.in_physical_cores[nCore] + 'h'
606 self.arr2_pipelines2cores[pipeline_idx] = tmpCore
607 else:
608 self.arr2_pipelines2cores[pipeline_idx] = \
609 fileTrace.in_physical_cores[nCore]
610
611 i -= 1
612 pipeline_idx += 1
613
614 def stage2_process_file(self):
615 outFileName = os.path.join(self._fileTrace.out_path,
616 self._fileTrace.prefix_outfile)
617 outFileName += "_{}CoReS".format(self.n_cores)
618
619 for i in range(0, self.n_cores):
620 mask = len2mask(self.cores[i].n_pipelines)
621 pipelines_ht0 = self.bitpos_apply((~self.cores[i].counter) & mask,
622 self.cores[i].bitpos,
623 self.cores[i].n_pipelines)
624
625 pipelines_ht1 = self.bitpos_apply(self.cores[i].counter,
626 self.cores[i].bitpos,
627 self.cores[i].n_pipelines)
628
629 # update pipelines to core mapping
630 self.pipelines2cores(pipelines_ht0, self.n_pipelines, i, False)
631 self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, True)
632
633 # update the in_buf as per the arr_pipelines2cores
634 for pipeline_idx in range(len(self.arr2_pipelines2cores)):
635 outFileName += "_{}".format(
636 self.arr2_pipelines2cores[pipeline_idx])
637 self.stage2_updateCoresInBuf(
638 pipeline_idx, self.arr2_pipelines2cores[pipeline_idx])
639
640 # by now the in_buf is all set to be written to file
641 outFileName += self._fileTrace.suffix_outfile
642 outputFile = open(outFileName, "w")
643
644 # write the file comments
645 strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
646 outputFile.write(
647 "; =============== Pipeline-to-Core Mapping ================\n"
648 "; Generated from file {}\n"
649 "; Input pipelines = {}\n"
650 "; Input cores = {}\n"
651 "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
652 .format(
653 self._fileTrace.in_file_namepath,
654 fileTrace.arr_pipelines,
655 fileTrace.in_physical_cores,
656 self._fileTrace.n_pipelines,
657 self._fileTrace.n_cores,
658 strTruncated,
659 self._fileTrace.hyper_thread))
660
661 outputFile.write(
662 "; {stg0cmt}\n"
663 "; {stg1cmt}\n"
664 "; {stg2cmt}\n"
665 "; ========================================================\n"
666 "; \n"
667 .format(
668 stg0cmt=self.stage0_file_comment,
669 stg1cmt=self.stage1_file_comment,
670 stg2cmt=self.stage2_file_comment))
671
672 # write the buffer contents
673 outputFile.write(self._fileTrace.in_buf)
674 outputFile.flush()
675 outputFile.close()
676
677 def stage2_process(self):
678 i = 0
679 while(i < self.n_cores):
680 self.cores[i].counter_max = len2mask(
681 self.cores[i].n_pipelines - 1)
682 i += 1
683
684 self.pos = self.n_cores - 1
685 while True:
686 if (self.pos == self.n_cores - 1):
687 self.stage2_log()
688
689 if (self.cores[self.pos].counter ==
690 self.cores[self.pos].counter_max):
691 if (self.pos == 0):
692 return
693
694 self.cores[self.pos].counter = 0
695 self.pos -= 1
696 continue
697
698 self.cores[self.pos].counter += 1
699 if(self.pos < self.n_cores - 1):
700 self.pos += 1
701
702
703 class FileTrace:
704
705 def __init__(self, filenamepath):
706 self.in_file_namepath = os.path.abspath(filenamepath)
707 self.in_filename = os.path.basename(self.in_file_namepath)
708 self.in_path = os.path.dirname(self.in_file_namepath)
709
710 filenamesplit = self.in_filename.split('.')
711 self.prefix_outfile = filenamesplit[0]
712 self.suffix_outfile = ".cfg"
713
714 # output folder: in the same folder as input file
715 # create new folder in the name of input file
716 self.out_path = os.path.join(
717 os.path.abspath(os.path.dirname(__file__)),
718 self.prefix_outfile)
719
720 try:
721 os.makedirs(self.out_path)
722 except OSError as excep:
723 if excep.errno == errno.EEXIST and os.path.isdir(self.out_path):
724 pass
725 else:
726 raise
727
728 self.in_buf = None
729 self.arr_pipelines = [] # holds the positions of search
730
731 self.max_cores = 15
732 self.max_pipelines = 15
733
734 self.in_physical_cores = None
735 self.hyper_thread = None
736
737 # save the num of pipelines determined from input file
738 self.n_pipelines = 0
739 # save the num of cores input (or the truncated value)
740 self.n_cores = 0
741 self.ncores_truncated = False
742
743 def print_TraceFile(self):
744 print("self.in_file_namepath = ", self.in_file_namepath)
745 print("self.in_filename = ", self.in_filename)
746 print("self.in_path = ", self.in_path)
747 print("self.out_path = ", self.out_path)
748 print("self.prefix_outfile = ", self.prefix_outfile)
749 print("self.suffix_outfile = ", self.suffix_outfile)
750 print("self.in_buf = ", self.in_buf)
751 print("self.arr_pipelines =", self.arr_pipelines)
752 print("self.in_physical_cores", self.in_physical_cores)
753 print("self.hyper_thread", self.hyper_thread)
754
755
756 def process(n_cores, n_pipelines, fileTrace):
757 '''process and map pipelines, cores.'''
758 if (n_cores == 0):
759 sys.exit('N_CORES is 0, exiting')
760
761 if (n_pipelines == 0):
762 sys.exit('N_PIPELINES is 0, exiting')
763
764 if (n_cores > n_pipelines):
765 print('\nToo many cores, truncating N_CORES to N_PIPELINES')
766 n_cores = n_pipelines
767 fileTrace.ncores_truncated = True
768
769 fileTrace.n_pipelines = n_pipelines
770 fileTrace.n_cores = n_cores
771
772 strTruncated = ("", "(Truncated)")[fileTrace.ncores_truncated]
773 print("N_PIPELINES = {}, N_CORES = {} {}"
774 .format(n_pipelines, n_cores, strTruncated))
775 print("---------------------------------------------------------------")
776
777 ctx0_inst = Context0()
778 ctx1_inst = Context1()
779 ctx2_inst = Context2()
780
781 # initialize the class variables
782 ctx1_inst._fileTrace = fileTrace
783 ctx2_inst._fileTrace = fileTrace
784
785 ctx0_inst.stage0_init(n_cores, n_pipelines, ctx1_inst, ctx2_inst)
786 ctx0_inst.stage0_process()
787
788
789 def validate_core(core):
790 match = reg_phycore.match(core)
791 if(match):
792 return True
793 else:
794 return False
795
796
797 def validate_phycores(phy_cores):
798 '''validate physical cores, check if unique.'''
799 # eat up whitespaces
800 phy_cores = phy_cores.strip().split(',')
801
802 # check if the core list is unique
803 if(len(phy_cores) != len(set(phy_cores))):
804 print('list of physical cores has duplicates')
805 return None
806
807 for core in phy_cores:
808 if not validate_core(core):
809 print('invalid physical core specified.')
810 return None
811 return phy_cores
812
813
814 def scanconfigfile(fileTrace):
815 '''scan input file for pipelines, validate then process.'''
816 # open file
817 filetoscan = open(fileTrace.in_file_namepath, 'r')
818 fileTrace.in_buf = filetoscan.read()
819
820 # reset iterator on open file
821 filetoscan.seek(0)
822
823 # scan input file for pipelines
824 # master pipelines to be ignored
825 pattern_pipeline = r'\[PIPELINE\d*\]'
826 pattern_mastertype = r'type\s*=\s*MASTER'
827
828 pending_pipeline = False
829 for line in filetoscan:
830 match_pipeline = re.search(pattern_pipeline, line)
831 match_type = re.search('type\s*=', line)
832 match_mastertype = re.search(pattern_mastertype, line)
833
834 if(match_pipeline):
835 sPipeline = line[match_pipeline.start():match_pipeline.end()]
836 pending_pipeline = True
837 elif(match_type):
838 # found a type definition...
839 if(match_mastertype is None):
840 # and this is not a master pipeline...
841 if(pending_pipeline):
842 # add it to the list of pipelines to be mapped
843 fileTrace.arr_pipelines.append(sPipeline)
844 pending_pipeline = False
845 else:
846 # and this is a master pipeline...
847 # ignore the current and move on to next
848 sPipeline = ""
849 pending_pipeline = False
850 filetoscan.close()
851
852 # validate if pipelines are unique
853 if(len(fileTrace.arr_pipelines) != len(set(fileTrace.arr_pipelines))):
854 sys.exit('Error: duplicate pipelines in input file')
855
856 num_pipelines = len(fileTrace.arr_pipelines)
857 num_cores = len(fileTrace.in_physical_cores)
858
859 print("-------------------Pipeline-to-core mapping--------------------")
860 print("Input pipelines = {}\nInput cores = {}"
861 .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores))
862
863 # input configuration file validations goes here
864 if (num_cores > fileTrace.max_cores):
865 sys.exit('Error: number of cores specified > max_cores (%d)' %
866 fileTrace.max_cores)
867
868 if (num_pipelines > fileTrace.max_pipelines):
869 sys.exit('Error: number of pipelines in input \
870 cfg file > max_pipelines (%d)' % fileTrace.max_pipelines)
871
872 # call process to generate pipeline-to-core mapping, trace and log
873 process(num_cores, num_pipelines, fileTrace)
874
875
876 if __name__ == "__main__":
877 parser = argparse.ArgumentParser(description='mappipelines')
878
879 reqNamedGrp = parser.add_argument_group('required named args')
880 reqNamedGrp.add_argument(
881 '-i',
882 '--input-file',
883 type=argparse.FileType('r'),
884 help='Input config file',
885 required=True)
886
887 reqNamedGrp.add_argument(
888 '-pc',
889 '--physical-cores',
890 type=validate_phycores,
891 help='''Enter available CPU cores in
892 format:\"<core>,<core>,...\"
893 where each core format: \"s<SOCKETID>c<COREID>\"
894 where SOCKETID={0..9}, COREID={1-99}''',
895 required=True)
896
897 # add optional arguments
898 parser.add_argument(
899 '-ht',
900 '--hyper-thread',
901 help='enable/disable hyper threading. default is ON',
902 default='ON',
903 choices=['ON', 'OFF'])
904
905 parser.add_argument(
906 '-nO',
907 '--no-output-file',
908 help='''disable output config file generation.
909 Output file generation is enabled by default''',
910 action="store_true")
911
912 args = parser.parse_args()
913
914 if(args.physical_cores is None):
915 parser.error("invalid physical_cores specified")
916
917 # create object of FileTrace and initialise
918 fileTrace = FileTrace(args.input_file.name)
919 fileTrace.in_physical_cores = args.physical_cores
920 fileTrace.hyper_thread = args.hyper_thread
921
922 if(fileTrace.hyper_thread == 'OFF'):
923 print("!!!!disabling stage2 HT!!!!")
924 enable_stage2_traceout = 0
925 enable_stage2_fileout = 0
926 elif(fileTrace.hyper_thread == 'ON'):
927 print("!!!!HT enabled. disabling stage1 file generation.!!!!")
928 enable_stage1_fileout = 0
929
930 if(args.no_output_file is True):
931 print("!!!!disabling stage1 and stage2 fileout!!!!")
932 enable_stage1_fileout = 0
933 enable_stage2_fileout = 0
934
935 scanconfigfile(fileTrace)