Support MIPS64 .o files - don't remove has_addend (#495)
[pyelftools.git] / test / run_dwarfdump_tests.py
1 #!/usr/bin/env python
2 #-------------------------------------------------------------------------------
3 # test/run_dwarfdump_tests.py
4 #
5 # Automatic test runner for elftools & llvm-dwarfdump-11
6 #
7 # Eli Bendersky (eliben@gmail.com)
8 # This code is in the public domain
9 #-------------------------------------------------------------------------------
10 import argparse
11 from difflib import SequenceMatcher
12 import logging
13 from multiprocessing import Pool
14 import os
15 import platform
16 import re
17 import sys
18 import time
19
20 from utils import run_exe, is_in_rootdir, dump_output_to_temp_files
21
22 # Make it possible to run this file from the root dir of pyelftools without
23 # installing pyelftools; useful for CI testing, etc.
24 sys.path[0:0] = ['.']
25
26 # Create a global logger object
27 testlog = logging.getLogger('run_tests')
28 testlog.setLevel(logging.DEBUG)
29 testlog.addHandler(logging.StreamHandler(sys.stdout))
30
31 # Following the readelf example, we ship our own.
32 if platform.system() == "Darwin": # MacOS
33 raise NotImplementedError("Not supported on MacOS")
34 elif platform.system() == "Windows":
35 raise NotImplementedError("Not supported on Windows")
36 else:
37 DWARFDUMP_PATH = 'test/external_tools/llvm-dwarfdump'
38
39 def discover_testfiles(rootdir):
40 """ Discover test files in the given directory. Yield them one by one.
41 """
42 for filename in os.listdir(rootdir):
43 _, ext = os.path.splitext(filename)
44 if ext == '.elf':
45 yield os.path.join(rootdir, filename)
46
47
48 def run_test_on_file(filename, verbose=False, opt=None):
49 """ Runs a test on the given input filename. Return True if all test
50 runs succeeded.
51 If opt is specified, rather that going over the whole
52 set of supported options, the test will only
53 run for one option.
54 """
55 success = True
56 testlog.info("Test file '%s'" % filename)
57 if opt is None:
58 options = [
59 '--debug-info'
60 ]
61 else:
62 options = [opt]
63
64 for option in options:
65 if verbose: testlog.info("..option='%s'" % option)
66
67 # stdouts will be a 2-element list: output of llvm-dwarfdump and output
68 # of scripts/dwarfdump.py
69 stdouts = []
70 for exe_path in [DWARFDUMP_PATH, 'scripts/dwarfdump.py']:
71 args = [option, '--verbose', filename]
72 if verbose: testlog.info("....executing: '%s %s'" % (
73 exe_path, ' '.join(args)))
74 t1 = time.time()
75 rc, stdout = run_exe(exe_path, args)
76 if verbose: testlog.info("....elapsed: %s" % (time.time() - t1,))
77 if rc != 0:
78 testlog.error("@@ aborting - '%s %s' returned '%s'" % (exe_path, option, rc))
79 return False
80 stdouts.append(stdout)
81 if verbose: testlog.info('....comparing output...')
82 t1 = time.time()
83 rc, errmsg = compare_output(*stdouts)
84 if verbose: testlog.info("....elapsed: %s" % (time.time() - t1,))
85 if rc:
86 if verbose: testlog.info('.......................SUCCESS')
87 else:
88 success = False
89 testlog.info('.......................FAIL')
90 testlog.info('....for file %s' % filename)
91 testlog.info('....for option "%s"' % option)
92 testlog.info('....Output #1 is llvm-dwarfdump, Output #2 is pyelftools')
93 testlog.info('@@ ' + errmsg)
94 dump_output_to_temp_files(testlog, filename, option, *stdouts)
95 return success
96
97
98 def compare_output(s1, s2):
99 """ Compare stdout strings s1 and s2.
100 s1 is from llvm-dwarfdump, s2 from elftools dwarfdump.py
101 Return pair success, errmsg. If comparison succeeds, success is True
102 and errmsg is empty. Otherwise success is False and errmsg holds a
103 description of the mismatch.
104 """
105 # llvm-dwarfdump sometimes adds a comment to addresses. We still haven't invested the
106 # effort to understand exactly when. For now, removing the section comment helps us pass
107 # the test.
108 s1 = s1.replace('(0x0000000000000000 ".text")', '(0x0000000000000000)')
109
110 def prepare_lines(s):
111 return [line for line in s.lower().splitlines() if line.strip() != '']
112
113 lines1 = prepare_lines(s1)
114 lines2 = prepare_lines(s2)
115
116 if len(lines1) != len(lines2):
117 return False, 'Number of lines different: %s vs %s' % (
118 len(lines1), len(lines2))
119
120 for (i, (line1, line2)) in enumerate(zip(lines1, lines2)):
121 # Compare ignoring whitespace
122 lines1_parts = line1.split()
123 lines2_parts = line2.split()
124
125 if ''.join(lines1_parts) != ''.join(lines2_parts):
126 sm = SequenceMatcher()
127 sm.set_seqs(lines1[i], lines2[i])
128 changes = sm.get_opcodes()
129
130 errmsg = 'Mismatch on line #%s:\n>>%s<<\n>>%s<<\n (%r)' % (
131 i, line1, line2, changes)
132 return False, errmsg
133 return True, ''
134
135 def main():
136 if not is_in_rootdir():
137 testlog.error('Error: Please run me from the root dir of pyelftools!')
138 return 1
139
140 argparser = argparse.ArgumentParser(
141 usage='usage: %(prog)s [options] [file] [file] ...',
142 prog='run_dwarfdump_tests.py')
143 argparser.add_argument('files', nargs='*', help='files to run tests on')
144 argparser.add_argument(
145 '--parallel', action='store_true',
146 help='run tests in parallel; always runs all tests w/o verbose')
147 argparser.add_argument('-V', '--verbose',
148 action='store_true', dest='verbose',
149 help='verbose output')
150 argparser.add_argument(
151 '-k', '--keep-going',
152 action='store_true', dest='keep_going',
153 help="Run all tests, don't stop at the first failure")
154 argparser.add_argument('--opt',
155 action='store', dest='opt', metavar='<dwarfdump-option>',
156 help= 'Limit the test one one dwarfdump option.')
157 args = argparser.parse_args()
158
159 if args.parallel:
160 if args.verbose or args.keep_going == False:
161 print('WARNING: parallel mode disables verbosity and always keeps going')
162
163 if args.verbose:
164 testlog.info('Running in verbose mode')
165 testlog.info('Python executable = %s' % sys.executable)
166 testlog.info('dwarfdump path = %s' % DWARFDUMP_PATH)
167 testlog.info('Given list of files: %s' % args.files)
168
169 # If file names are given as command-line arguments, only these files
170 # are taken as inputs. Otherwise, autodiscovery is performed.
171 if len(args.files) > 0:
172 filenames = args.files
173 else:
174 filenames = sorted(discover_testfiles('test/testfiles_for_dwarfdump'))
175
176 if len(filenames) > 1 and args.parallel:
177 pool = Pool()
178 results = pool.map(run_test_on_file, filenames)
179 failures = results.count(False)
180 else:
181 failures = 0
182 for filename in filenames:
183 if not run_test_on_file(filename, args.verbose, args.opt):
184 failures += 1
185 if not args.keep_going:
186 break
187
188 if failures == 0:
189 testlog.info('\nConclusion: SUCCESS')
190 return 0
191 elif args.keep_going:
192 testlog.info('\nConclusion: FAIL ({}/{})'.format(
193 failures, len(filenames)))
194 return 1
195 else:
196 testlog.info('\nConclusion: FAIL')
197 return 1
198
199
200 if __name__ == '__main__':
201 sys.exit(main())