• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tarantool / test-run / 5132620618

pending completion
5132620618

push

github

ylobankov
Prettify messages about log files

The ideas behind the change:

* The log is sometimes from a tarantool instance, but a luatest based
  test may print logs from another process to stderr. Use more general
  term 'log' to don't confuse anyone.
* Replace term 'instance' with 'test-run server' due to the same
  reasons.
* Make zero size and no file situations more explicit.
* Catch and report 'no logfile property' situation (shouldn't occur, but
  was semi-handled by this code previously for some reason).
* Reduce code reusability for the sake of readability and to ease future
  modifications.

758 of 1554 branches covered (48.78%)

Branch coverage included in aggregate %.

22 of 22 new or added lines in 1 file covered. (100.0%)

2929 of 4328 relevant lines covered (67.68%)

0.68 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

50.73
/lib/test.py
1
import filecmp
1✔
2
import gevent
1✔
3
import os
1✔
4
import pprint
1✔
5
import re
1✔
6
import shutil
1✔
7
import sys
1✔
8
import traceback
1✔
9
from functools import partial
1✔
10

11
from lib import Options
1✔
12
from lib.colorer import color_stdout
1✔
13
from lib.utils import assert_bytes
1✔
14
from lib.utils import non_empty_valgrind_logs
1✔
15
from lib.utils import print_tail_n
1✔
16
from lib.utils import print_unidiff as utils_print_unidiff
1✔
17
from lib.utils import safe_makedirs
1✔
18
from lib.utils import str_to_bytes
1✔
19
from lib import pytap13
1✔
20

21

22
class TestExecutionError(OSError):
1✔
23
    """To be raised when a test execution fails"""
24
    pass
1✔
25

26

27
class TestRunGreenlet(gevent.Greenlet):
1✔
28
    def __init__(self, green_callable, *args, **kwargs):
1✔
29
        self.callable = green_callable
1✔
30
        self.callable_args = args
1✔
31
        self.callable_kwargs = kwargs
1✔
32
        super(TestRunGreenlet, self).__init__()
1✔
33

34
    def _run(self, *args, **kwargs):
1✔
35
        self.callable(*self.callable_args, **self.callable_kwargs)
1✔
36

37
    def __repr__(self):
1✔
38
        return "<TestRunGreenlet at {0} info='{1}'>".format(
×
39
            hex(id(self)), getattr(self, "info", None))
40

41

42
class FilteredStream:
1✔
43
    """Helper class to filter .result file output"""
44
    def __init__(self, filename):
1✔
45
        self.stream = open(filename, "wb+")
1✔
46
        self.filters = []
1✔
47
        self.inspector = None
1✔
48

49
    def write_bytes(self, fragment):
1✔
50
        """ The same as ``write()``, but accepts ``<bytes>`` as
51
            input.
52
        """
53
        assert_bytes(fragment)
1✔
54
        skipped = False
1✔
55
        for line in fragment.splitlines(True):
1✔
56
            original_len = len(line.strip())
1✔
57
            for pattern, replacement in self.filters:
1✔
58
                line = re.sub(pattern, replacement, line)
1✔
59
                # don't write lines that are completely filtered out:
60
                skipped = original_len and not line.strip()
1✔
61
                if skipped:
1!
62
                    break
×
63
            if not skipped:
1!
64
                self.stream.write(line)
1✔
65

66
    def write(self, fragment):
1✔
67
        """ Apply all filters, then write result to the underlying
68
            stream.
69

70
            Do line-oriented filtering: the fragment doesn't have
71
            to represent just one line.
72

73
            Accepts ``<str>`` as input, just like the standard
74
            ``sys.stdout.write()``.
75
        """
76
        self.write_bytes(str_to_bytes(fragment))
1✔
77

78
    def push_filter(self, pattern, replacement):
1✔
79
        self.filters.append([str_to_bytes(pattern), str_to_bytes(replacement)])
1✔
80

81
    def pop_filter(self):
1✔
82
        self.filters.pop()
×
83

84
    def clear_all_filters(self):
1✔
85
        self.filters = []
1✔
86

87
    def close(self):
1✔
88
        self.clear_all_filters()
1✔
89
        self.stream.close()
1✔
90

91
    def flush(self):
1✔
92
        self.stream.flush()
1✔
93

94
    def fileno(self):
1✔
95
        """ May be used for direct writting. Discards any filters.
96
        """
97
        return self.stream.fileno()
1✔
98

99

100
def get_filename_by_test(postfix, test_name):
1✔
101
    """For <..>/<name>_test.* or <..>/<name>.test.* return <name> + postfix
102

103
    Examples:
104
        postfix='.result', test_name='foo/bar.test.lua' => return 'bar.result'
105
        postfix='.reject', test_name='bar_test.lua' => return 'bar.reject'
106
    """
107
    rg = re.compile(r'[._]test.*')
1✔
108
    return os.path.basename(rg.sub(postfix, test_name))
1✔
109

110

111
get_reject = partial(get_filename_by_test, '.reject')
1✔
112
get_result = partial(get_filename_by_test, '.result')
1✔
113
get_skipcond = partial(get_filename_by_test, '.skipcond')
1✔
114

115

116
class Test(object):
1✔
117
    """An individual test file. A test object can run itself
118
    and remembers completion state of the run.
119

120
    If file <test_name>.skipcond is exists it will be executed before
121
    test and if it sets self.skip to True value the test will be skipped.
122
    """
123

124
    def __init__(self, name, args, suite_ini, params={}, conf_name=None):
1✔
125
        """Initialize test properties: path to test file, path to
126
        temporary result file, path to the client program, test status."""
127
        self.name = name
1✔
128
        self.args = args
1✔
129
        self.suite_ini = suite_ini
1✔
130
        self.result = os.path.join(suite_ini['suite'], get_result(name))
1✔
131
        self.skip_cond = os.path.join(suite_ini['suite'], get_skipcond(name))
1✔
132
        self.tmp_result = os.path.join(suite_ini['vardir'], get_result(name))
1✔
133
        self.var_suite_path = os.path.join(suite_ini['vardir'], 'rejects',
1✔
134
                                           suite_ini['suite'])
135
        self.reject = os.path.join(self.var_suite_path, get_reject(name))
1✔
136
        self.is_executed = False
1✔
137
        self.is_executed_ok = None
1✔
138
        self.is_equal_result = None
1✔
139
        self.is_valgrind_clean = True
1✔
140
        self.is_terminated = False
1✔
141
        self.run_params = params
1✔
142
        self.conf_name = conf_name
1✔
143

144
        # filled in execute() when a greenlet runs
145
        self.current_test_greenlet = None
1✔
146

147
        # prevent double/triple reporting
148
        self.is_crash_reported = False
1✔
149

150
    @property
1✔
151
    def id(self):
1✔
152
        return self.name, self.conf_name
1✔
153

154
    def passed(self):
1✔
155
        """Return true if this test was run successfully."""
156
        return (self.is_executed and
×
157
                self.is_executed_ok and
158
                self.is_equal_result)
159

160
    def execute(self, server):
1✔
161
        # Note: don't forget to set 'server.current_test = self' in
162
        # inherited classes. Crash reporting relying on that.
163
        server.current_test = self
1✔
164
        # All the test runs must be isolated between each other on each worker.
165
        server.pretest_clean()
1✔
166

167
    def run(self, server):
1✔
168
        """ Execute the test assuming it's a python program.  If the test
169
            aborts, print its output to stdout, and raise an exception. Else,
170
            comprare result and reject files.  If there is a difference, print
171
            it to stdout.
172

173
            Returns short status of the test as a string: 'skip', 'pass',
174
            'new', 'updated' or 'fail'.
175
            There is also one possible value for short_status, 'disabled',
176
            but it returned in the caller, TestSuite.run_test().
177
        """
178

179
        # Note: test was created before certain worker become known, so we need
180
        # to update temporary result directory here as it depends on 'vardir'.
181
        self.tmp_result = os.path.join(self.suite_ini['vardir'],
1✔
182
                                       os.path.basename(self.result))
183

184
        diagnostics = "unknown"
1✔
185
        save_stdout = sys.stdout
1✔
186
        try:
1✔
187
            self.skip = False
1✔
188
            if os.path.exists(self.skip_cond):
1!
189
                sys.stdout = FilteredStream(self.tmp_result)
×
190
                stdout_fileno = sys.stdout.stream.fileno()
×
191
                new_globals = dict(locals(), **server.__dict__)
×
192
                with open(self.skip_cond, 'r') as f:
×
193
                    code = compile(f.read(), self.skip_cond, 'exec')
×
194
                    exec(code, new_globals)
×
195
                sys.stdout.close()
×
196
                sys.stdout = save_stdout
×
197
            if not self.skip:
1!
198
                sys.stdout = FilteredStream(self.tmp_result)
1✔
199
                stdout_fileno = sys.stdout.stream.fileno()
1✔
200
                self.execute(server)
1✔
201
                sys.stdout.flush()
1✔
202
            self.is_executed_ok = True
1✔
203
        except TestExecutionError:
×
204
            self.is_executed_ok = False
×
205
        except Exception as e:
×
206
            if e.__class__.__name__ == 'TarantoolStartError':
×
207
                # worker should stop
208
                raise
×
209
            color_stdout('\nTest.run() received the following error:\n'
×
210
                         '{0}\n'.format(traceback.format_exc()),
211
                         schema='error')
212
            diagnostics = str(e)
×
213
        finally:
214
            if sys.stdout and sys.stdout != save_stdout:
1!
215
                sys.stdout.close()
1✔
216
            sys.stdout = save_stdout
1!
217
        self.is_executed = True
1✔
218
        sys.stdout.flush()
1✔
219

220
        is_tap = False
1✔
221
        if not self.skip:
1!
222
            if not os.path.exists(self.tmp_result):
1!
223
                self.is_executed_ok = False
×
224
                self.is_equal_result = False
×
225
            elif self.is_executed_ok and os.path.isfile(self.result):
1✔
226
                self.is_equal_result = filecmp.cmp(self.result,
1✔
227
                                                   self.tmp_result)
228
            elif self.is_executed_ok:
1!
229
                if Options().args.is_verbose:
1!
230
                    color_stdout('\n')
×
231
                    with open(self.tmp_result, 'r') as f:
×
232
                        color_stdout(f.read(), schema='log')
×
233
                is_tap, is_ok, is_skip = self.check_tap_output()
1✔
234
                self.is_equal_result = is_ok
1✔
235
                self.skip = is_skip
1✔
236
        else:
237
            self.is_equal_result = 1
×
238

239
        if self.args.valgrind:
1!
240
            non_empty_logs = non_empty_valgrind_logs(
×
241
                server.current_valgrind_logs(for_test=True))
242
            self.is_valgrind_clean = not bool(non_empty_logs)
×
243

244
        short_status = None
1✔
245

246
        if self.skip:
1!
247
            short_status = 'skip'
×
248
            color_stdout("[ skip ]\n", schema='test_skip')
×
249
            if os.path.exists(self.tmp_result):
×
250
                os.remove(self.tmp_result)
×
251
        elif (self.is_executed_ok and
1!
252
              self.is_equal_result and
253
              self.is_valgrind_clean):
254
            short_status = 'pass'
1✔
255
            color_stdout("[ pass ]\n", schema='test_pass')
1✔
256
            if os.path.exists(self.tmp_result):
1!
257
                os.remove(self.tmp_result)
1✔
258
        elif (self.is_executed_ok and
×
259
              not self.is_equal_result and
260
              not os.path.isfile(self.result) and
261
              not is_tap and
262
              Options().args.update_result):
263
            shutil.copy(self.tmp_result, self.result)
×
264
            short_status = 'new'
×
265
            color_stdout("[ new ]\n", schema='test_new')
×
266
        elif (self.is_executed_ok and
×
267
              not self.is_equal_result and
268
              os.path.isfile(self.result) and
269
              not is_tap and
270
              Options().args.update_result):
271
            shutil.copy(self.tmp_result, self.result)
×
272
            short_status = 'updated'
×
273
            color_stdout("[ updated ]\n", schema='test_new')
×
274
        else:
275
            has_result = os.path.exists(self.tmp_result)
×
276
            if has_result:
×
277
                safe_makedirs(self.var_suite_path)
×
278
                shutil.copy(self.tmp_result, self.reject)
×
279
            short_status = 'fail'
×
280
            color_stdout("[ fail ]\n", schema='test_fail')
×
281

282
            where = ""
×
283
            if not self.is_crash_reported and not has_result:
×
284
                color_stdout('\nCannot open %s\n' % self.tmp_result,
×
285
                             schema='error')
286
            elif not self.is_crash_reported and not self.is_executed_ok:
×
287
                self.print_diagnostics(self.reject,
×
288
                                       "Test failed! Output from reject file "
289
                                       "{0}:\n".format(self.reject))
290
                server.print_log(15)
×
291
                where = ": test execution aborted, reason " \
×
292
                        "'{0}'".format(diagnostics)
293
            elif not self.is_crash_reported and not self.is_equal_result:
×
294
                self.print_unidiff()
×
295
                server.print_log(15)
×
296
                where = ": wrong test output"
×
297
            elif not self.is_crash_reported and not self.is_valgrind_clean:
×
298
                os.remove(self.reject)
×
299
                for log_file in non_empty_logs:
×
300
                    self.print_diagnostics(log_file,
×
301
                                           "Test failed! Output from log file "
302
                                           "{0}:\n".format(log_file))
303
                where = ": there were warnings in the valgrind log file(s)"
×
304
        return short_status
1✔
305

306
    def print_diagnostics(self, log_file, message):
1✔
307
        """Print whole lines of client program output leading to test
308
        failure. Used to diagnose a failure of the client program"""
309

310
        color_stdout(message, schema='error')
×
311
        print_tail_n(log_file)
×
312

313
    def print_unidiff(self):
1✔
314
        """Print a unified diff between .test and .result files. Used
315
        to establish the cause of a failure when .test differs
316
        from .result."""
317

318
        color_stdout("\nTest failed! Result content mismatch:\n",
×
319
                     schema='error')
320
        utils_print_unidiff(self.result, self.reject)
×
321

322
    def tap_parse_print_yaml(self, yml):
1✔
323
        if 'expected' in yml and 'got' in yml:
×
324
            color_stdout('Expected: %s\n' % yml['expected'], schema='error')
×
325
            color_stdout('Got:      %s\n' % yml['got'], schema='error')
×
326
            del yml['expected']
×
327
            del yml['got']
×
328
        if 'trace' in yml:
×
329
            color_stdout('Traceback:\n', schema='error')
×
330
            for fr in yml['trace']:
×
331
                fname = fr.get('name', '')
×
332
                if fname:
×
333
                    fname = " function '%s'" % fname
×
334
                line = '[%-4s]%s at <%s:%d>\n' % (
×
335
                    fr['what'], fname, fr['filename'], fr['line']
336
                )
337
                color_stdout(line, schema='error')
×
338
            del yml['trace']
×
339
        if 'filename' in yml:
×
340
            del yml['filename']
×
341
        if 'line' in yml:
×
342
            del yml['line']
×
343
        yaml_str = pprint.pformat(yml)
×
344
        color_stdout('\n', schema='error')
×
345
        if len(yml):
×
346
            for line in yaml_str.splitlines():
×
347
                color_stdout(line + '\n', schema='error')
×
348
            color_stdout('\n', schema='error')
×
349

350
    def check_tap_output(self):
1✔
351
        """ Returns is_tap, is_ok, is_skip """
352
        try:
1✔
353
            with open(self.tmp_result, 'r') as f:
1✔
354
                content = f.read()
1✔
355
            tap = pytap13.TAP13()
1✔
356
            tap.parse(content)
1✔
357
        except (ValueError, UnicodeDecodeError) as e:
×
358
            color_stdout('\nTAP13 parse failed (%s).\n' % str(e),
×
359
                         schema='error')
360
            color_stdout('\nNo result file (%s) found.\n' % self.result,
×
361
                         schema='error')
362
            if not Options().args.update_result:
×
363
                msg = 'Run the test with --update-result option to write the new result file.\n'
×
364
                color_stdout(msg, schema='error')
×
365
            self.is_crash_reported = True
×
366
            return False, False
×
367

368
        is_ok = True
1✔
369
        is_skip = False
1✔
370
        num_skipped_tests = 0
1✔
371
        for test_case in tap.tests:
1✔
372
            if test_case.directive == "SKIP":
1!
373
                num_skipped_tests += 1
×
374
            if test_case.result == 'ok':
1!
375
                continue
1✔
376
            if is_ok:
×
377
                color_stdout('\n')
×
378
            color_stdout('%s %s %s # %s %s\n' % (
×
379
                test_case.result,
380
                test_case.id or '',
381
                test_case.description or '-',
382
                test_case.directive or '',
383
                test_case.comment or ''), schema='error')
384
            if test_case.yaml:
×
385
                self.tap_parse_print_yaml(test_case.yaml)
×
386
            is_ok = False
×
387
        if not is_ok:
1!
388
            color_stdout('Rejected result file: %s\n' % self.reject,
×
389
                         schema='test_var')
390
            self.is_crash_reported = True
×
391
        if num_skipped_tests == len(tap.tests):
1!
392
            is_skip = True
×
393

394
        return True, is_ok, is_skip
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc