|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +def cli(): |
| 4 | + import os,argparse |
| 5 | + cli = argparse.ArgumentParser(description='CLARA scaling test') |
| 6 | + cli.add_argument('-y',help='YAML file',required=True) |
| 7 | + cli.add_argument('-c',help='CLARA_HOME path',default=os.getenv('CLARA_HOME',None)) |
| 8 | + cli.add_argument('-t',help='threads',default=[2,4],type=int,action='append') |
| 9 | + cli.add_argument('-e',help='events per thread',default=100,type=int) |
| 10 | + cli.add_argument('input',help='input data file') |
| 11 | + cfg = cli.parse_args() |
| 12 | + import sys |
| 13 | + if cfg.c is None: sys.exit('-c or $CLARA_HOME is required') |
| 14 | + return cfg |
| 15 | + |
| 16 | +def run(cmd): |
| 17 | + import subprocess |
| 18 | + print('scaling >>> '+' '.join(cmd)) |
| 19 | + p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,universal_newlines=True,encoding='latin-1') |
| 20 | + for line in iter(p.stdout.readline, ''): |
| 21 | + line = line.strip() |
| 22 | + if len(line) > 0: |
| 23 | + yield line |
| 24 | + p.wait() |
| 25 | + if p.returncode != 0: |
| 26 | + pass |
| 27 | + |
| 28 | +def benchmark(cfg, threads): |
| 29 | + import collections |
| 30 | + exiting,benchmarks = False,collections.OrderedDict() |
| 31 | + cmd = ['run-clara', |
| 32 | + '-c',cfg.c, |
| 33 | + '-n',str(cfg.e*threads), |
| 34 | + '-t',str(threads), |
| 35 | + '-y',cfg.y, |
| 36 | + '-o',f'tmp-scaling-{threads}', |
| 37 | + cfg.input] |
| 38 | + for line in run(cmd): |
| 39 | + cols = line.split() |
| 40 | + print(line) |
| 41 | + try: |
| 42 | + if line.find('Benchmark results:') >= 0: |
| 43 | + exiting = True |
| 44 | + elif line.find('Processing is complete') >= 0: |
| 45 | + exiting = False |
| 46 | + elif len(cols) > 20: |
| 47 | + if line.find('Processed') >= 0: |
| 48 | + benchmarks['event'] = float(cols[12]) |
| 49 | + elif exiting: |
| 50 | + # catch-all for services: |
| 51 | + if len(cols) > 14: |
| 52 | + if 'services' not in benchmarks: |
| 53 | + benchmarks['services'] = collections.OrderedDict() |
| 54 | + benchmarks['services'][cols[2]] = float(cols[14]) |
| 55 | + # FIXME: what are these, why don't they add up? |
| 56 | + elif line.find('Average processing time') >= 0: |
| 57 | + benchmarks['avg'] = float(cols[6]) |
| 58 | + elif line.find('Total processing time') >= 0: |
| 59 | + benchmarks['total'] = float(cols[6]) |
| 60 | + elif line.find('Total orchestrator time') >= 0: |
| 61 | + benchmarks['orch'] = float(cols[6]) |
| 62 | + except ValueError: |
| 63 | + pass |
| 64 | + return benchmarks |
| 65 | + |
| 66 | +def table(benchmarks): |
| 67 | + table = [] |
| 68 | + header = [ 'threads' ] |
| 69 | + b = benchmarks[0][1] |
| 70 | + header.extend([x for x in b if x != 'services']) |
| 71 | + if 'services' in b: |
| 72 | + header.extend(b['services'].keys()) |
| 73 | + table.append(header) |
| 74 | + for b in benchmarks: |
| 75 | + threads,benchmark = b[0],b[1] |
| 76 | + row = [threads] |
| 77 | + for k in ['event','avg','total','orch','services']: |
| 78 | + if k in benchmark: |
| 79 | + if k == 'services': |
| 80 | + row.extend(benchmark[k].values()) |
| 81 | + else: |
| 82 | + row.append(benchmark[k]) |
| 83 | + table.append(row) |
| 84 | + return table |
| 85 | + |
| 86 | +def show(benchmarks): |
| 87 | + for row in table(benchmarks): |
| 88 | + print(' '.join([str(x) for x in row])) |
| 89 | + |
| 90 | +def save(benchmarks): |
| 91 | + with open('scaling.txt','w') as f: |
| 92 | + for row in table(benchmarks): |
| 93 | + f.write(' '.join([str(x) for x in row])) |
| 94 | + |
| 95 | +if __name__ == '__main__': |
| 96 | + cfg = cli() |
| 97 | + benchmarks = [] |
| 98 | + for threads in cfg.t: |
| 99 | + benchmarks.append([threads, benchmark(cfg, threads)]) |
| 100 | + show(benchmarks) |
| 101 | + save(benchmarks) |
| 102 | + |
0 commit comments