2014-10-12 20:49:36 +02:00
|
|
|
#!/usr/bin/env python2
|
|
|
|
#
|
|
|
|
# Parallel VM test case executor
|
2015-01-17 10:25:46 +01:00
|
|
|
# Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi>
|
2014-10-12 20:49:36 +02:00
|
|
|
#
|
|
|
|
# This software may be distributed under the terms of the BSD license.
|
|
|
|
# See README for more details.
|
|
|
|
|
2014-10-19 09:37:02 +02:00
|
|
|
import curses
|
2014-10-12 20:49:36 +02:00
|
|
|
import fcntl
|
2014-12-24 10:40:03 +01:00
|
|
|
import logging
|
2014-10-12 20:49:36 +02:00
|
|
|
import os
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
|
2014-12-24 10:40:03 +01:00
|
|
|
logger = logging.getLogger()
|
|
|
|
|
2015-03-06 22:47:34 +01:00
|
|
|
# Test cases that take significantly longer time to execute than average.
|
|
|
|
long_tests = [ "ap_roam_open",
|
|
|
|
"wpas_mesh_password_mismatch_retry",
|
|
|
|
"wpas_mesh_password_mismatch",
|
|
|
|
"hostapd_oom_wpa2_psk_connect",
|
|
|
|
"ap_hs20_fetch_osu_stop",
|
|
|
|
"ap_roam_wpa2_psk",
|
|
|
|
"ibss_wpa_none_ccmp",
|
|
|
|
"nfc_wps_er_handover_pk_hash_mismatch_sta",
|
|
|
|
"go_neg_peers_force_diff_freq",
|
|
|
|
"p2p_cli_invite",
|
|
|
|
"sta_ap_scan_2b",
|
|
|
|
"ap_pmf_sta_unprot_deauth_burst",
|
|
|
|
"ap_bss_add_remove_during_ht_scan",
|
|
|
|
"wext_scan_hidden",
|
|
|
|
"autoscan_exponential",
|
|
|
|
"nfc_p2p_client",
|
|
|
|
"wnm_bss_keep_alive",
|
|
|
|
"ap_inactivity_disconnect",
|
|
|
|
"scan_bss_expiration_age",
|
|
|
|
"autoscan_periodic",
|
|
|
|
"discovery_group_client",
|
|
|
|
"concurrent_p2pcli",
|
|
|
|
"ap_bss_add_remove",
|
|
|
|
"wpas_ap_wps",
|
|
|
|
"wext_pmksa_cache",
|
|
|
|
"ibss_wpa_none",
|
|
|
|
"ap_ht_40mhz_intolerant_ap",
|
|
|
|
"ibss_rsn",
|
|
|
|
"discovery_pd_retries",
|
|
|
|
"ap_wps_setup_locked_timeout",
|
|
|
|
"ap_vht160",
|
|
|
|
"dfs_radar",
|
|
|
|
"dfs",
|
2016-03-27 19:30:40 +02:00
|
|
|
"dfs_ht40_minus",
|
2015-03-06 22:47:34 +01:00
|
|
|
"grpform_cred_ready_timeout",
|
|
|
|
"hostapd_oom_wpa2_eap_connect",
|
|
|
|
"wpas_ap_dfs",
|
|
|
|
"autogo_many",
|
|
|
|
"hostapd_oom_wpa2_eap",
|
|
|
|
"ibss_open",
|
|
|
|
"proxyarp_open_ebtables",
|
|
|
|
"radius_failover",
|
|
|
|
"obss_scan_40_intolerant",
|
|
|
|
"dbus_connect_oom",
|
|
|
|
"proxyarp_open",
|
|
|
|
"ap_wps_iteration",
|
2015-04-24 15:47:51 +02:00
|
|
|
"ap_wps_iteration_error",
|
2015-07-27 21:24:34 +02:00
|
|
|
"ap_wps_pbc_timeout",
|
2015-08-28 19:33:52 +02:00
|
|
|
"ap_wps_http_timeout",
|
2015-07-27 21:24:34 +02:00
|
|
|
"p2p_go_move_reg_change",
|
|
|
|
"p2p_go_move_active",
|
|
|
|
"p2p_go_move_scm",
|
|
|
|
"p2p_go_move_scm_peer_supports",
|
|
|
|
"p2p_go_move_scm_peer_does_not_support",
|
|
|
|
"p2p_go_move_scm_multi" ]
|
2015-03-06 22:47:34 +01:00
|
|
|
|
2014-12-24 16:34:29 +01:00
|
|
|
def get_failed(vm):
|
2014-10-19 09:37:02 +02:00
|
|
|
failed = []
|
2014-12-24 16:34:29 +01:00
|
|
|
for i in range(num_servers):
|
|
|
|
failed += vm[i]['failed']
|
|
|
|
return failed
|
2014-12-24 15:06:37 +01:00
|
|
|
|
|
|
|
def vm_read_stdout(vm, i):
|
2014-12-24 16:34:29 +01:00
|
|
|
global total_started, total_passed, total_failed, total_skipped
|
2015-01-17 10:25:46 +01:00
|
|
|
global rerun_failures
|
2014-12-24 16:34:29 +01:00
|
|
|
|
2014-12-24 15:06:37 +01:00
|
|
|
ready = False
|
|
|
|
try:
|
|
|
|
out = vm['proc'].stdout.read()
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
logger.debug("VM[%d] stdout.read[%s]" % (i, out))
|
|
|
|
pending = vm['pending'] + out
|
|
|
|
lines = []
|
|
|
|
while True:
|
|
|
|
pos = pending.find('\n')
|
|
|
|
if pos < 0:
|
|
|
|
break
|
|
|
|
line = pending[0:pos].rstrip()
|
|
|
|
pending = pending[(pos + 1):]
|
|
|
|
logger.debug("VM[%d] stdout full line[%s]" % (i, line))
|
2014-12-24 16:34:29 +01:00
|
|
|
if line.startswith("READY"):
|
|
|
|
ready = True
|
|
|
|
elif line.startswith("PASS"):
|
2014-12-24 15:06:37 +01:00
|
|
|
ready = True
|
2014-12-24 16:34:29 +01:00
|
|
|
total_passed += 1
|
|
|
|
elif line.startswith("FAIL"):
|
|
|
|
ready = True
|
|
|
|
total_failed += 1
|
2015-03-26 21:18:54 +01:00
|
|
|
vals = line.split(' ')
|
|
|
|
if len(vals) < 2:
|
|
|
|
logger.info("VM[%d] incomplete FAIL line: %s" % (i, line))
|
|
|
|
name = line
|
|
|
|
else:
|
|
|
|
name = vals[1]
|
2014-12-24 16:34:29 +01:00
|
|
|
logger.debug("VM[%d] test case failed: %s" % (i, name))
|
|
|
|
vm['failed'].append(name)
|
|
|
|
elif line.startswith("NOT-FOUND"):
|
|
|
|
ready = True
|
|
|
|
total_failed += 1
|
|
|
|
logger.info("VM[%d] test case not found" % i)
|
|
|
|
elif line.startswith("SKIP"):
|
|
|
|
ready = True
|
|
|
|
total_skipped += 1
|
|
|
|
elif line.startswith("START"):
|
|
|
|
total_started += 1
|
2015-06-18 19:44:59 +02:00
|
|
|
if len(vm['failed']) == 0:
|
|
|
|
vals = line.split(' ')
|
|
|
|
if len(vals) >= 2:
|
|
|
|
vm['fail_seq'].append(vals[1])
|
2014-12-24 15:06:37 +01:00
|
|
|
vm['out'] += line + '\n'
|
|
|
|
lines.append(line)
|
|
|
|
vm['pending'] = pending
|
|
|
|
return ready
|
|
|
|
|
2014-10-19 09:37:02 +02:00
|
|
|
def show_progress(scr):
|
|
|
|
global num_servers
|
|
|
|
global vm
|
2014-11-16 21:34:54 +01:00
|
|
|
global dir
|
|
|
|
global timestamp
|
2014-11-19 01:03:39 +01:00
|
|
|
global tests
|
2014-12-23 21:25:29 +01:00
|
|
|
global first_run_failures
|
2014-12-24 16:34:29 +01:00
|
|
|
global total_started, total_passed, total_failed, total_skipped
|
2014-11-19 01:03:39 +01:00
|
|
|
|
|
|
|
total_tests = len(tests)
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Total tests: %d" % total_tests)
|
2014-10-19 09:37:02 +02:00
|
|
|
|
|
|
|
scr.leaveok(1)
|
|
|
|
scr.addstr(0, 0, "Parallel test execution status", curses.A_BOLD)
|
2014-10-12 20:49:36 +02:00
|
|
|
for i in range(0, num_servers):
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.addstr(i + 1, 0, "VM %d:" % (i + 1), curses.A_BOLD)
|
2014-11-19 01:03:39 +01:00
|
|
|
scr.addstr(i + 1, 10, "starting VM")
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.addstr(num_servers + 1, 0, "Total:", curses.A_BOLD)
|
2014-11-19 01:03:39 +01:00
|
|
|
scr.addstr(num_servers + 1, 20, "TOTAL={} STARTED=0 PASS=0 FAIL=0 SKIP=0".format(total_tests))
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.refresh()
|
2014-10-12 20:49:36 +02:00
|
|
|
|
2014-12-23 21:25:29 +01:00
|
|
|
completed_first_pass = False
|
|
|
|
rerun_tests = []
|
|
|
|
|
2014-10-12 20:49:36 +02:00
|
|
|
while True:
|
|
|
|
running = False
|
2014-12-23 21:25:29 +01:00
|
|
|
first_running = False
|
2014-10-12 20:49:36 +02:00
|
|
|
updated = False
|
2014-12-23 21:25:29 +01:00
|
|
|
|
2014-10-12 20:49:36 +02:00
|
|
|
for i in range(0, num_servers):
|
2014-12-23 21:25:29 +01:00
|
|
|
if completed_first_pass:
|
|
|
|
continue
|
|
|
|
if vm[i]['first_run_done']:
|
|
|
|
continue
|
2014-10-12 20:49:36 +02:00
|
|
|
if not vm[i]['proc']:
|
|
|
|
continue
|
|
|
|
if vm[i]['proc'].poll() is not None:
|
|
|
|
vm[i]['proc'] = None
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.move(i + 1, 10)
|
|
|
|
scr.clrtoeol()
|
2014-11-16 21:34:54 +01:00
|
|
|
log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
|
|
|
|
with open(log, 'r') as f:
|
|
|
|
if "Kernel panic" in f.read():
|
|
|
|
scr.addstr("kernel panic")
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("VM[%d] kernel panic" % i)
|
2014-11-16 21:34:54 +01:00
|
|
|
else:
|
2014-12-23 21:25:29 +01:00
|
|
|
scr.addstr("unexpected exit")
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("VM[%d] unexpected exit" % i)
|
2014-10-19 09:37:02 +02:00
|
|
|
updated = True
|
2014-10-12 20:49:36 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
running = True
|
2014-12-23 21:25:29 +01:00
|
|
|
first_running = True
|
2014-10-12 20:49:36 +02:00
|
|
|
try:
|
|
|
|
err = vm[i]['proc'].stderr.read()
|
|
|
|
vm[i]['err'] += err
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.debug("VM[%d] stderr.read[%s]" % (i, err))
|
2014-10-12 20:49:36 +02:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2014-12-24 15:06:37 +01:00
|
|
|
if vm_read_stdout(vm[i], i):
|
|
|
|
scr.move(i + 1, 10)
|
|
|
|
scr.clrtoeol()
|
|
|
|
updated = True
|
|
|
|
if not tests:
|
|
|
|
vm[i]['first_run_done'] = True
|
|
|
|
scr.addstr("completed first round")
|
|
|
|
logger.info("VM[%d] completed first round" % i)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
name = tests.pop(0)
|
|
|
|
vm[i]['proc'].stdin.write(name + '\n')
|
|
|
|
scr.addstr(name)
|
|
|
|
logger.debug("VM[%d] start test %s" % (i, name))
|
2014-12-23 21:25:29 +01:00
|
|
|
|
|
|
|
if not first_running and not completed_first_pass:
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("First round of testing completed")
|
2014-12-23 21:25:29 +01:00
|
|
|
if tests:
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Unexpected test cases remaining from first round: " + str(tests))
|
2014-12-23 21:25:29 +01:00
|
|
|
raise Exception("Unexpected test cases remaining from first round")
|
|
|
|
completed_first_pass = True
|
2014-12-24 16:34:29 +01:00
|
|
|
for name in get_failed(vm):
|
2015-01-17 10:25:46 +01:00
|
|
|
if rerun_failures:
|
|
|
|
rerun_tests.append(name)
|
2014-12-23 21:25:29 +01:00
|
|
|
first_run_failures.append(name)
|
|
|
|
|
|
|
|
for i in range(num_servers):
|
|
|
|
if not completed_first_pass:
|
|
|
|
continue
|
|
|
|
if not vm[i]['proc']:
|
2014-10-12 20:49:36 +02:00
|
|
|
continue
|
2014-12-23 21:25:29 +01:00
|
|
|
if vm[i]['proc'].poll() is not None:
|
|
|
|
vm[i]['proc'] = None
|
|
|
|
scr.move(i + 1, 10)
|
|
|
|
scr.clrtoeol()
|
|
|
|
log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
|
|
|
|
with open(log, 'r') as f:
|
|
|
|
if "Kernel panic" in f.read():
|
|
|
|
scr.addstr("kernel panic")
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("VM[%d] kernel panic" % i)
|
2014-12-23 21:25:29 +01:00
|
|
|
else:
|
|
|
|
scr.addstr("completed run")
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("VM[%d] completed run" % i)
|
2014-12-23 21:25:29 +01:00
|
|
|
updated = True
|
|
|
|
continue
|
|
|
|
|
|
|
|
running = True
|
|
|
|
try:
|
|
|
|
err = vm[i]['proc'].stderr.read()
|
|
|
|
vm[i]['err'] += err
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.debug("VM[%d] stderr.read[%s]" % (i, err))
|
2014-12-23 21:25:29 +01:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2014-12-24 15:06:37 +01:00
|
|
|
ready = False
|
|
|
|
if vm[i]['first_run_done']:
|
|
|
|
vm[i]['first_run_done'] = False
|
|
|
|
ready = True
|
|
|
|
else:
|
|
|
|
ready = vm_read_stdout(vm[i], i)
|
|
|
|
if ready:
|
|
|
|
scr.move(i + 1, 10)
|
|
|
|
scr.clrtoeol()
|
|
|
|
updated = True
|
|
|
|
if not rerun_tests:
|
|
|
|
vm[i]['proc'].stdin.write('\n')
|
|
|
|
scr.addstr("shutting down")
|
|
|
|
logger.info("VM[%d] shutting down" % i)
|
2014-12-23 21:25:29 +01:00
|
|
|
else:
|
2014-12-24 15:06:37 +01:00
|
|
|
name = rerun_tests.pop(0)
|
|
|
|
vm[i]['proc'].stdin.write(name + '\n')
|
|
|
|
scr.addstr(name + "(*)")
|
|
|
|
logger.debug("VM[%d] start test %s (*)" % (i, name))
|
2014-10-12 20:49:36 +02:00
|
|
|
|
|
|
|
if not running:
|
|
|
|
break
|
|
|
|
|
|
|
|
if updated:
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.move(num_servers + 1, 10)
|
|
|
|
scr.clrtoeol()
|
2014-12-24 16:34:29 +01:00
|
|
|
scr.addstr("{} %".format(int(100.0 * (total_passed + total_failed + total_skipped) / total_tests)))
|
|
|
|
scr.addstr(num_servers + 1, 20, "TOTAL={} STARTED={} PASS={} FAIL={} SKIP={}".format(total_tests, total_started, total_passed, total_failed, total_skipped))
|
|
|
|
failed = get_failed(vm)
|
2014-10-19 09:37:02 +02:00
|
|
|
if len(failed) > 0:
|
|
|
|
scr.move(num_servers + 2, 0)
|
|
|
|
scr.clrtoeol()
|
|
|
|
scr.addstr("Failed test cases: ")
|
2014-12-25 12:20:42 +01:00
|
|
|
count = 0
|
2014-10-19 09:37:02 +02:00
|
|
|
for f in failed:
|
2014-12-25 12:20:42 +01:00
|
|
|
count += 1
|
|
|
|
if count > 30:
|
|
|
|
scr.addstr('...')
|
|
|
|
scr.clrtoeol()
|
|
|
|
break
|
2014-12-24 16:34:29 +01:00
|
|
|
scr.addstr(f)
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.addstr(' ')
|
2014-12-23 21:25:29 +01:00
|
|
|
|
|
|
|
scr.move(0, 35)
|
|
|
|
scr.clrtoeol()
|
|
|
|
if rerun_tests:
|
|
|
|
scr.addstr("(RETRY FAILED %d)" % len(rerun_tests))
|
2015-01-17 10:25:46 +01:00
|
|
|
elif rerun_failures:
|
|
|
|
pass
|
2014-12-23 21:25:29 +01:00
|
|
|
elif first_run_failures:
|
|
|
|
scr.addstr("(RETRY FAILED)")
|
|
|
|
|
2014-10-19 09:37:02 +02:00
|
|
|
scr.refresh()
|
2014-10-12 20:49:36 +02:00
|
|
|
|
2014-12-23 21:25:29 +01:00
|
|
|
time.sleep(0.25)
|
2014-11-19 01:03:39 +01:00
|
|
|
|
|
|
|
scr.refresh()
|
|
|
|
time.sleep(0.3)
|
2014-10-12 20:49:36 +02:00
|
|
|
|
2014-10-19 09:37:02 +02:00
|
|
|
def main():
|
2015-03-03 23:08:40 +01:00
|
|
|
import argparse
|
2015-03-03 23:08:41 +01:00
|
|
|
import os
|
2014-10-19 09:37:02 +02:00
|
|
|
global num_servers
|
|
|
|
global vm
|
2014-11-16 21:34:54 +01:00
|
|
|
global dir
|
|
|
|
global timestamp
|
2014-11-19 01:03:39 +01:00
|
|
|
global tests
|
2014-12-23 21:25:29 +01:00
|
|
|
global first_run_failures
|
2014-12-24 16:34:29 +01:00
|
|
|
global total_started, total_passed, total_failed, total_skipped
|
2015-01-17 10:25:46 +01:00
|
|
|
global rerun_failures
|
2014-12-24 16:34:29 +01:00
|
|
|
|
|
|
|
total_started = 0
|
|
|
|
total_passed = 0
|
|
|
|
total_failed = 0
|
|
|
|
total_skipped = 0
|
2014-10-19 09:37:02 +02:00
|
|
|
|
2014-12-24 10:40:03 +01:00
|
|
|
debug_level = logging.INFO
|
2015-01-17 10:25:46 +01:00
|
|
|
rerun_failures = True
|
2014-12-19 23:51:55 +01:00
|
|
|
timestamp = int(time.time())
|
|
|
|
|
2015-03-03 23:08:41 +01:00
|
|
|
scriptsdir = os.path.dirname(os.path.realpath(sys.argv[0]))
|
|
|
|
|
2015-03-03 23:08:40 +01:00
|
|
|
p = argparse.ArgumentParser(description='run multiple testing VMs in parallel')
|
|
|
|
p.add_argument('num_servers', metavar='number of VMs', type=int, choices=range(1, 100),
|
|
|
|
help="number of VMs to start")
|
|
|
|
p.add_argument('-f', dest='testmodules', metavar='<test module>',
|
|
|
|
help='execute only tests from these test modules',
|
|
|
|
type=str, nargs='+')
|
|
|
|
p.add_argument('-1', dest='no_retry', action='store_const', const=True, default=False,
|
|
|
|
help="don't retry failed tests automatically")
|
|
|
|
p.add_argument('--debug', dest='debug', action='store_const', const=True, default=False,
|
|
|
|
help="enable debug logging")
|
|
|
|
p.add_argument('--codecov', dest='codecov', action='store_const', const=True, default=False,
|
|
|
|
help="enable code coverage collection")
|
|
|
|
p.add_argument('--shuffle-tests', dest='shuffle', action='store_const', const=True, default=False,
|
|
|
|
help="shuffle test cases to randomize order")
|
2015-03-06 22:47:34 +01:00
|
|
|
p.add_argument('--short', dest='short', action='store_const', const=True,
|
|
|
|
default=False,
|
|
|
|
help="only run short-duration test cases")
|
2015-03-03 23:08:40 +01:00
|
|
|
p.add_argument('--long', dest='long', action='store_const', const=True,
|
|
|
|
default=False,
|
|
|
|
help="include long-duration test cases")
|
2015-03-14 11:09:23 +01:00
|
|
|
p.add_argument('--valgrind', dest='valgrind', action='store_const',
|
|
|
|
const=True, default=False,
|
|
|
|
help="run tests under valgrind")
|
2015-03-03 23:08:40 +01:00
|
|
|
p.add_argument('params', nargs='*')
|
|
|
|
args = p.parse_args()
|
2015-11-24 17:39:58 +01:00
|
|
|
|
|
|
|
dir = os.environ.get('HWSIM_TEST_LOG_DIR', '/tmp/hwsim-test-logs')
|
|
|
|
try:
|
|
|
|
os.makedirs(dir)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2015-03-03 23:08:40 +01:00
|
|
|
num_servers = args.num_servers
|
|
|
|
rerun_failures = not args.no_retry
|
|
|
|
if args.debug:
|
2014-12-24 10:40:03 +01:00
|
|
|
debug_level = logging.DEBUG
|
2015-03-03 23:08:40 +01:00
|
|
|
extra_args = []
|
2015-03-14 11:09:23 +01:00
|
|
|
if args.valgrind:
|
|
|
|
extra_args += [ '--valgrind' ]
|
2015-03-03 23:08:40 +01:00
|
|
|
if args.long:
|
|
|
|
extra_args += [ '--long' ]
|
|
|
|
if args.codecov:
|
2014-12-19 23:51:55 +01:00
|
|
|
print "Code coverage - build separate binaries"
|
2015-11-24 17:39:58 +01:00
|
|
|
logdir = os.path.join(dir, str(timestamp))
|
2014-12-19 23:51:55 +01:00
|
|
|
os.makedirs(logdir)
|
2015-03-03 23:08:41 +01:00
|
|
|
subprocess.check_call([os.path.join(scriptsdir, 'build-codecov.sh'),
|
|
|
|
logdir])
|
2014-12-19 23:51:55 +01:00
|
|
|
codecov_args = ['--codecov_dir', logdir]
|
|
|
|
codecov = True
|
|
|
|
else:
|
|
|
|
codecov_args = []
|
|
|
|
codecov = False
|
|
|
|
|
2014-12-23 21:25:29 +01:00
|
|
|
first_run_failures = []
|
2015-03-14 11:12:01 +01:00
|
|
|
if args.params:
|
|
|
|
tests = args.params
|
|
|
|
else:
|
|
|
|
tests = []
|
|
|
|
cmd = [ os.path.join(os.path.dirname(scriptsdir), 'run-tests.py'),
|
|
|
|
'-L' ]
|
|
|
|
if args.testmodules:
|
|
|
|
cmd += [ "-f" ]
|
|
|
|
cmd += args.testmodules
|
|
|
|
lst = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
|
|
|
for l in lst.stdout.readlines():
|
|
|
|
name = l.split(' ')[0]
|
|
|
|
tests.append(name)
|
2014-11-19 01:03:39 +01:00
|
|
|
if len(tests) == 0:
|
|
|
|
sys.exit("No test cases selected")
|
|
|
|
|
2015-03-03 23:08:40 +01:00
|
|
|
if args.shuffle:
|
2015-03-03 08:47:03 +01:00
|
|
|
from random import shuffle
|
|
|
|
shuffle(tests)
|
|
|
|
elif num_servers > 2 and len(tests) > 100:
|
2014-12-21 17:20:15 +01:00
|
|
|
# Move test cases with long duration to the beginning as an
|
|
|
|
# optimization to avoid last part of the test execution running a long
|
|
|
|
# duration test case on a single VM while all other VMs have already
|
|
|
|
# completed their work.
|
2015-03-06 22:47:34 +01:00
|
|
|
for l in long_tests:
|
2014-12-21 17:20:15 +01:00
|
|
|
if l in tests:
|
|
|
|
tests.remove(l)
|
|
|
|
tests.insert(0, l)
|
2015-03-06 22:47:34 +01:00
|
|
|
if args.short:
|
|
|
|
tests = [t for t in tests if t not in long_tests]
|
2014-12-21 17:20:15 +01:00
|
|
|
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.setLevel(debug_level)
|
|
|
|
log_handler = logging.FileHandler('parallel-vm.log')
|
|
|
|
log_handler.setLevel(debug_level)
|
|
|
|
fmt = "%(asctime)s %(levelname)s %(message)s"
|
|
|
|
log_formatter = logging.Formatter(fmt)
|
|
|
|
log_handler.setFormatter(log_formatter)
|
|
|
|
logger.addHandler(log_handler)
|
|
|
|
|
2014-10-19 09:37:02 +02:00
|
|
|
vm = {}
|
|
|
|
for i in range(0, num_servers):
|
|
|
|
print("\rStarting virtual machine {}/{}".format(i + 1, num_servers)),
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Starting virtual machine {}/{}".format(i + 1, num_servers))
|
2015-03-03 23:08:41 +01:00
|
|
|
cmd = [os.path.join(scriptsdir, 'vm-run.sh'), '--delay', str(i),
|
|
|
|
'--timestamp', str(timestamp),
|
2014-11-16 21:24:18 +01:00
|
|
|
'--ext', 'srv.%d' % (i + 1),
|
2014-12-19 23:51:55 +01:00
|
|
|
'-i'] + codecov_args + extra_args
|
2014-10-19 09:37:02 +02:00
|
|
|
vm[i] = {}
|
2014-12-23 21:25:29 +01:00
|
|
|
vm[i]['first_run_done'] = False
|
2014-10-19 09:37:02 +02:00
|
|
|
vm[i]['proc'] = subprocess.Popen(cmd,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
vm[i]['out'] = ""
|
2014-12-24 15:06:37 +01:00
|
|
|
vm[i]['pending'] = ""
|
2014-10-19 09:37:02 +02:00
|
|
|
vm[i]['err'] = ""
|
2014-12-24 16:34:29 +01:00
|
|
|
vm[i]['failed'] = []
|
2015-06-18 19:44:59 +02:00
|
|
|
vm[i]['fail_seq'] = []
|
2014-10-19 09:37:02 +02:00
|
|
|
for stream in [ vm[i]['proc'].stdout, vm[i]['proc'].stderr ]:
|
|
|
|
fd = stream.fileno()
|
|
|
|
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
|
|
|
|
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
|
|
|
|
print
|
|
|
|
|
|
|
|
curses.wrapper(show_progress)
|
|
|
|
|
2014-10-12 20:49:36 +02:00
|
|
|
with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
|
|
|
|
for i in range(0, num_servers):
|
|
|
|
f.write('VM {}\n{}\n{}\n'.format(i, vm[i]['out'], vm[i]['err']))
|
|
|
|
|
2014-12-24 16:34:29 +01:00
|
|
|
failed = get_failed(vm)
|
2014-10-12 20:49:36 +02:00
|
|
|
|
2014-12-23 21:25:29 +01:00
|
|
|
if first_run_failures:
|
2015-06-18 19:44:59 +02:00
|
|
|
print "To re-run same failure sequence(s):"
|
|
|
|
for i in range(0, num_servers):
|
|
|
|
if len(vm[i]['failed']) == 0:
|
|
|
|
continue
|
2015-11-30 18:42:56 +01:00
|
|
|
print "./vm-run.sh",
|
|
|
|
if args.long:
|
|
|
|
print "--long",
|
2015-06-18 19:44:59 +02:00
|
|
|
skip = len(vm[i]['fail_seq'])
|
|
|
|
skip -= min(skip, 30)
|
|
|
|
for t in vm[i]['fail_seq']:
|
|
|
|
if skip > 0:
|
|
|
|
skip -= 1
|
|
|
|
continue
|
|
|
|
print t,
|
|
|
|
print
|
2014-10-12 20:49:36 +02:00
|
|
|
print "Failed test cases:"
|
2014-12-23 21:25:29 +01:00
|
|
|
for f in first_run_failures:
|
|
|
|
print f,
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Failed: " + f)
|
2014-12-23 21:25:29 +01:00
|
|
|
print
|
|
|
|
double_failed = []
|
2014-12-24 16:34:29 +01:00
|
|
|
for name in failed:
|
2014-12-23 21:25:29 +01:00
|
|
|
double_failed.append(name)
|
|
|
|
for test in first_run_failures:
|
|
|
|
double_failed.remove(test)
|
2015-01-17 10:25:46 +01:00
|
|
|
if not rerun_failures:
|
|
|
|
pass
|
|
|
|
elif failed and not double_failed:
|
2014-12-23 21:25:29 +01:00
|
|
|
print "All failed cases passed on retry"
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("All failed cases passed on retry")
|
2014-12-23 21:25:29 +01:00
|
|
|
elif double_failed:
|
|
|
|
print "Failed even on retry:"
|
|
|
|
for f in double_failed:
|
|
|
|
print f,
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Failed on retry: " + f)
|
2014-10-12 20:49:36 +02:00
|
|
|
print
|
2014-12-24 16:34:29 +01:00
|
|
|
res = "TOTAL={} PASS={} FAIL={} SKIP={}".format(total_started,
|
|
|
|
total_passed,
|
|
|
|
total_failed,
|
|
|
|
total_skipped)
|
2014-12-24 10:40:03 +01:00
|
|
|
print(res)
|
|
|
|
logger.info(res)
|
2014-11-19 01:03:39 +01:00
|
|
|
print "Logs: " + dir + '/' + str(timestamp)
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Logs: " + dir + '/' + str(timestamp))
|
2014-10-12 20:49:36 +02:00
|
|
|
|
2014-11-16 21:34:54 +01:00
|
|
|
for i in range(0, num_servers):
|
2014-12-24 15:06:37 +01:00
|
|
|
if len(vm[i]['pending']) > 0:
|
|
|
|
logger.info("Unprocessed stdout from VM[%d]: '%s'" %
|
|
|
|
(i, vm[i]['pending']))
|
2014-11-16 21:34:54 +01:00
|
|
|
log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
|
|
|
|
with open(log, 'r') as f:
|
|
|
|
if "Kernel panic" in f.read():
|
|
|
|
print "Kernel panic in " + log
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Kernel panic in " + log)
|
2014-11-16 21:34:54 +01:00
|
|
|
|
2014-12-19 23:51:55 +01:00
|
|
|
if codecov:
|
|
|
|
print "Code coverage - preparing report"
|
|
|
|
for i in range(num_servers):
|
2015-03-03 23:08:41 +01:00
|
|
|
subprocess.check_call([os.path.join(scriptsdir,
|
|
|
|
'process-codecov.sh'),
|
2014-12-19 23:51:55 +01:00
|
|
|
logdir + ".srv.%d" % (i + 1),
|
|
|
|
str(i)])
|
2015-03-03 23:08:41 +01:00
|
|
|
subprocess.check_call([os.path.join(scriptsdir, 'combine-codecov.sh'),
|
|
|
|
logdir])
|
2014-12-19 23:51:55 +01:00
|
|
|
print "file://%s/index.html" % logdir
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Code coverage report: file://%s/index.html" % logdir)
|
2014-12-19 23:51:55 +01:00
|
|
|
|
2015-01-17 10:25:46 +01:00
|
|
|
if double_failed or (failed and not rerun_failures):
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Test run complete - failures found")
|
2014-12-23 21:25:29 +01:00
|
|
|
sys.exit(2)
|
|
|
|
if failed:
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Test run complete - failures found on first run; passed on retry")
|
2014-12-23 21:25:29 +01:00
|
|
|
sys.exit(1)
|
2014-12-24 10:40:03 +01:00
|
|
|
logger.info("Test run complete - no failures")
|
2014-12-23 21:25:29 +01:00
|
|
|
sys.exit(0)
|
|
|
|
|
2014-10-12 20:49:36 +02:00
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|