Skip to content
Snippets Groups Projects
microbenchmark_runner.py 3.90 KiB
import subprocess,sys,os,time
from auditlib import auditctl
from statistics import mean, stdev

NUM_CPUS = 4

""" SYSCALL_MAP = {
    'getpid': '0',
    'mmap2' : '1',
    'write' : '2',
    'read' : '3',
    'close' : '4',
    'openat' : '5',
    'connect' : '6',
    'mprotect' : '7',
    'fstat64' : '8',
    'llseek' : '9',
    'pread64' : '10'
} """

SYSCALL_MAP = {
    'getpid': '0',
    'getppid' : '1',
    'getgid32' : '2',
    'getuid32' : '3',
    'getpgrp' : '4'
}

#Setup performance mode
def setup_perf_mode():
    for i in range(NUM_CPUS):
        with(open('/sys/devices/system/cpu/cpu{}/cpufreq/scaling_governor'.format(str(i)),'w')) as f:
            f.write("performance")
    time.sleep(1)
    for i in range(NUM_CPUS):
        with(open('/sys/devices/system/cpu/cpu{}/cpufreq/scaling_governor'.format(str(i)),'r')) as f:
            print("Current cpu frequency ",f.read())

def read_audit_times_from_file(filename):
    timestamps = []
    with open(filename,'r') as audited_file:
        lines = audited_file.readlines()
        for line in lines:
            if len(line.split()) >= 8:
                timing = int(line.split()[8])/1000.0
                timestamps.append(timing)
    return timestamps[10:] #Discard the first 10 measurements, as they seem to be outliers due to cache effects or other issues starting up

def run_microbenchmark(syscall,iterations,num_threads,output_file_prefix,rt_priority=True):

    print("Test parameters :\n syscall : {} \n iterations : {} \n num_threads : {} \n output_file_prefix : {} \n rt_priority : {}".format(
        syscall, iterations, num_threads, output_file_prefix, rt_priority))


    executable = os.path.join(os.getcwd(),"bin/microbench")
    #setup_perf_mode()

    #setup audit to begin with

    auditctl.clear_syscall_rules()
    auditctl.add_syscall_rule(syscall,executable)
    auditctl.set_buffer_size(10000)
    auditctl.enable_audit()
    auditctl.clear_audit_logs()

    with open(output_file_prefix + '-audited.log','w') as outfile:
        if rt_priority:
            subprocess.run(['chrt','--fifo',"99",executable,iterations,num_threads,SYSCALL_MAP[syscall]],stdout=outfile)
        else:
            subprocess.run([executable,iterations,num_threads,SYSCALL_MAP[syscall]],stdout=outfile)

    print("Sleeping to let the audit records be committed to log")
    time.sleep(5)
    auditctl.save_audit_logs(output_file_prefix + ".audit")
    auditctl.disable_audit()

    with open(output_file_prefix + '-unaudited.log','w') as outfile:
        if rt_priority:
            subprocess.run(['chrt','--fifo',"99",executable,iterations,num_threads,SYSCALL_MAP[syscall]],stdout=outfile)
        else:
            subprocess.run([executable,iterations,num_threads,SYSCALL_MAP[syscall]],stdout=outfile)

    #Here we can run the analysis of the timing data
    audited = read_audit_times_from_file(output_file_prefix + '-audited.log')
    unaudited = read_audit_times_from_file(output_file_prefix + '-unaudited.log')

    audited_mean,audited_stdev = mean(audited),stdev(audited)
    auditless_mean,auditless_stdev = mean(unaudited), stdev(unaudited)

    print("Statistics for run (microseconds) : min , max , mean , stdev")
    print("With audit ",len(audited),min(audited),max(audited),audited_mean,audited_stdev)
    print("Without audit ",len(unaudited),min(unaudited),max(unaudited),auditless_mean,auditless_stdev)
    print("Audit latency ",(audited_mean - auditless_mean))

    return

def main():
    rt_priority = True  #rt_priority is set by default
    syscall = sys.argv[1]
    iterations = sys.argv[2]
    num_threads = sys.argv[3]
    rt_priority = (sys.argv[4] == 'True')
    output_file_prefix = sys.argv[5]

    if syscall == 'all':
        for scall in SYSCALL_MAP.keys():
            run_microbenchmark(scall,iterations,num_threads,scall,rt_priority)

    else:
        run_microbenchmark(syscall,iterations,num_threads,output_file_prefix,rt_priority)

if __name__ == "__main__":
    main()