2 #-------------------------------------------------------------------------------
3 # test/run_dwarfdump_tests.py
5 # Automatic test runner for elftools & llvm-dwarfdump-11
7 # Eli Bendersky (eliben@gmail.com)
8 # This code is in the public domain
9 #-------------------------------------------------------------------------------
11 from difflib
import SequenceMatcher
13 from multiprocessing
import Pool
20 from utils
import run_exe
, is_in_rootdir
, dump_output_to_temp_files
22 # Make it possible to run this file from the root dir of pyelftools without
23 # installing pyelftools; useful for CI testing, etc.
26 # Create a global logger object
27 testlog
= logging
.getLogger('run_tests')
28 testlog
.setLevel(logging
.DEBUG
)
29 testlog
.addHandler(logging
.StreamHandler(sys
.stdout
))
31 # Following the readelf example, we ship our own.
32 if platform
.system() == "Darwin": # MacOS
33 raise NotImplementedError("Not supported on MacOS")
34 elif platform
.system() == "Windows":
35 raise NotImplementedError("Not supported on Windows")
37 DWARFDUMP_PATH
= 'test/external_tools/llvm-dwarfdump'
39 def discover_testfiles(rootdir
):
40 """ Discover test files in the given directory. Yield them one by one.
42 for filename
in os
.listdir(rootdir
):
43 _
, ext
= os
.path
.splitext(filename
)
45 yield os
.path
.join(rootdir
, filename
)
48 def run_test_on_file(filename
, verbose
=False, opt
=None):
49 """ Runs a test on the given input filename. Return True if all test
51 If opt is specified, rather that going over the whole
52 set of supported options, the test will only
56 testlog
.info("Test file '%s'" % filename
)
64 for option
in options
:
65 if verbose
: testlog
.info("..option='%s'" % option
)
67 # stdouts will be a 2-element list: output of llvm-dwarfdump and output
68 # of scripts/dwarfdump.py
70 for exe_path
in [DWARFDUMP_PATH
, 'scripts/dwarfdump.py']:
71 args
= [option
, '--verbose', filename
]
72 if verbose
: testlog
.info("....executing: '%s %s'" % (
73 exe_path
, ' '.join(args
)))
75 rc
, stdout
= run_exe(exe_path
, args
)
76 if verbose
: testlog
.info("....elapsed: %s" % (time
.time() - t1
,))
78 testlog
.error("@@ aborting - '%s %s' returned '%s'" % (exe_path
, option
, rc
))
80 stdouts
.append(stdout
)
81 if verbose
: testlog
.info('....comparing output...')
83 rc
, errmsg
= compare_output(*stdouts
)
84 if verbose
: testlog
.info("....elapsed: %s" % (time
.time() - t1
,))
86 if verbose
: testlog
.info('.......................SUCCESS')
89 testlog
.info('.......................FAIL')
90 testlog
.info('....for file %s' % filename
)
91 testlog
.info('....for option "%s"' % option
)
92 testlog
.info('....Output #1 is llvm-dwarfdump, Output #2 is pyelftools')
93 testlog
.info('@@ ' + errmsg
)
94 dump_output_to_temp_files(testlog
, filename
, option
, *stdouts
)
98 def compare_output(s1
, s2
):
99 """ Compare stdout strings s1 and s2.
100 s1 is from llvm-dwarfdump, s2 from elftools dwarfdump.py
101 Return pair success, errmsg. If comparison succeeds, success is True
102 and errmsg is empty. Otherwise success is False and errmsg holds a
103 description of the mismatch.
105 # llvm-dwarfdump sometimes adds a comment to addresses. We still haven't invested the
106 # effort to understand exactly when. For now, removing the section comment helps us pass
108 s1
= s1
.replace('(0x0000000000000000 ".text")', '(0x0000000000000000)')
110 def prepare_lines(s
):
111 return [line
for line
in s
.lower().splitlines() if line
.strip() != '']
113 lines1
= prepare_lines(s1
)
114 lines2
= prepare_lines(s2
)
116 if len(lines1
) != len(lines2
):
117 return False, 'Number of lines different: %s vs %s' % (
118 len(lines1
), len(lines2
))
120 for (i
, (line1
, line2
)) in enumerate(zip(lines1
, lines2
)):
121 # Compare ignoring whitespace
122 lines1_parts
= line1
.split()
123 lines2_parts
= line2
.split()
125 if ''.join(lines1_parts
) != ''.join(lines2_parts
):
126 sm
= SequenceMatcher()
127 sm
.set_seqs(lines1
[i
], lines2
[i
])
128 changes
= sm
.get_opcodes()
130 errmsg
= 'Mismatch on line #%s:\n>>%s<<\n>>%s<<\n (%r)' % (
131 i
, line1
, line2
, changes
)
136 if not is_in_rootdir():
137 testlog
.error('Error: Please run me from the root dir of pyelftools!')
140 argparser
= argparse
.ArgumentParser(
141 usage
='usage: %(prog)s [options] [file] [file] ...',
142 prog
='run_dwarfdump_tests.py')
143 argparser
.add_argument('files', nargs
='*', help='files to run tests on')
144 argparser
.add_argument(
145 '--parallel', action
='store_true',
146 help='run tests in parallel; always runs all tests w/o verbose')
147 argparser
.add_argument('-V', '--verbose',
148 action
='store_true', dest
='verbose',
149 help='verbose output')
150 argparser
.add_argument(
151 '-k', '--keep-going',
152 action
='store_true', dest
='keep_going',
153 help="Run all tests, don't stop at the first failure")
154 argparser
.add_argument('--opt',
155 action
='store', dest
='opt', metavar
='<dwarfdump-option>',
156 help= 'Limit the test one one dwarfdump option.')
157 args
= argparser
.parse_args()
160 if args
.verbose
or args
.keep_going
== False:
161 print('WARNING: parallel mode disables verbosity and always keeps going')
164 testlog
.info('Running in verbose mode')
165 testlog
.info('Python executable = %s' % sys
.executable
)
166 testlog
.info('dwarfdump path = %s' % DWARFDUMP_PATH
)
167 testlog
.info('Given list of files: %s' % args
.files
)
169 # If file names are given as command-line arguments, only these files
170 # are taken as inputs. Otherwise, autodiscovery is performed.
171 if len(args
.files
) > 0:
172 filenames
= args
.files
174 filenames
= sorted(discover_testfiles('test/testfiles_for_dwarfdump'))
176 if len(filenames
) > 1 and args
.parallel
:
178 results
= pool
.map(run_test_on_file
, filenames
)
179 failures
= results
.count(False)
182 for filename
in filenames
:
183 if not run_test_on_file(filename
, args
.verbose
, args
.opt
):
185 if not args
.keep_going
:
189 testlog
.info('\nConclusion: SUCCESS')
191 elif args
.keep_going
:
192 testlog
.info('\nConclusion: FAIL ({}/{})'.format(
193 failures
, len(filenames
)))
196 testlog
.info('\nConclusion: FAIL')
200 if __name__
== '__main__':