aboutsummaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/core/runner.py
blob: f656e1a9c5f6517be5397e243970cdd37ab2f9b0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#

import os
import time
import unittest
import logging
import re
import json
import sys

from unittest import TextTestResult as _TestResult
from unittest import TextTestRunner as _TestRunner

class OEStreamLogger(object):
    def __init__(self, logger):
        self.logger = logger
        self.buffer = ""

    def write(self, msg):
        if len(msg) > 1 and msg[0] != '\n':
            if '...' in msg:
                self.buffer += msg
            elif self.buffer:
                self.buffer += msg
                self.logger.log(logging.INFO, self.buffer)
                self.buffer = ""
            else:
                self.logger.log(logging.INFO, msg)

    def flush(self):
        for handler in self.logger.handlers:
            handler.flush()

class OETestResult(_TestResult):
    def __init__(self, tc, *args, **kwargs):
        super(OETestResult, self).__init__(*args, **kwargs)

        self.successes = []
        self.starttime = {}
        self.endtime = {}
        self.progressinfo = {}
        self.extraresults = {}

        # Inject into tc so that TestDepends decorator can see results
        tc.results = self

        self.tc = tc

        # stdout and stderr for each test case
        self.logged_output = {}

    def startTest(self, test):
        # May have been set by concurrencytest
        if test.id() not in self.starttime:
            self.starttime[test.id()] = time.time()
        super(OETestResult, self).startTest(test)

    def stopTest(self, test):
        self.endtime[test.id()] = time.time()
        if self.buffer:
            self.logged_output[test.id()] = (
                    sys.stdout.getvalue(), sys.stderr.getvalue())
        super(OETestResult, self).stopTest(test)
        if test.id() in self.progressinfo:
            self.tc.logger.info(self.progressinfo[test.id()])

        # Print the errors/failures early to aid/speed debugging, its a pain
        # to wait until selftest finishes to see them.
        for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
            for (scase, msg) in getattr(self, t):
                if test.id() == scase.id():
                    self.tc.logger.info(str(msg))
                    break

    def logSummary(self, component, context_msg=''):
        elapsed_time = self.tc._run_end_time - self.tc._run_start_time
        self.tc.logger.info("SUMMARY:")
        self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
            context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
            elapsed_time))

        if self.wasSuccessful():
            msg = "%s - OK - All required tests passed" % component
        else:
            msg = "%s - FAIL - Required tests failed" % component
        msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
        self.tc.logger.info(msg)

    def _getTestResultDetails(self, case):
        result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
                        'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED',
                        'unexpectedSuccesses' : 'PASSED'}

        for rtype in result_types:
            found = False
            for resultclass in getattr(self, rtype):
                # unexpectedSuccesses are just lists, not lists of tuples
                if isinstance(resultclass, tuple):
                    scase, msg = resultclass
                else:
                    scase, msg = resultclass, None
                if case.id() == scase.id():
                    found = True
                    break
                scase_str = str(scase.id())

                # When fails at module or class level the class name is passed as string
                # so figure out to see if match
                m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str)
                if m:
                    if case.__class__.__module__ == m.group('module_name'):
                        found = True
                        break

                m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str)
                if m:
                    class_name = "%s.%s" % (case.__class__.__module__,
                                            case.__class__.__name__)

                    if class_name == m.group('class_name'):
                        found = True
                        break

            if found:
                return result_types[rtype], msg

        return 'UNKNOWN', None

    def extractExtraResults(self, test, details = None):
        extraresults = None
        if details is not None and "extraresults" in details:
            extraresults = details.get("extraresults", {})
        elif hasattr(test, "extraresults"):
            extraresults = test.extraresults

        if extraresults is not None:
            for k, v in extraresults.items():
                # handle updating already existing entries (e.g. ptestresults.sections)
                if k in self.extraresults:
                    self.extraresults[k].update(v)
                else:
                    self.extraresults[k] = v

    def addError(self, test, *args, details = None):
        self.extractExtraResults(test, details = details)
        return super(OETestResult, self).addError(test, *args)

    def addFailure(self, test, *args, details = None):
        self.extractExtraResults(test, details = details)
        return super(OETestResult, self).addFailure(test, *args)

    def addSuccess(self, test, details = None):
        #Added so we can keep track of successes too
        self.successes.append((test, None))
        self.extractExtraResults(test, details = details)
        return super(OETestResult, self).addSuccess(test)

    def addExpectedFailure(self, test, *args, details = None):
        self.extractExtraResults(test, details = details)
        return super(OETestResult, self).addExpectedFailure(test, *args)

    def addUnexpectedSuccess(self, test, details = None):
        self.extractExtraResults(test, details = details)
        return super(OETestResult, self).addUnexpectedSuccess(test)

    def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
            dump_streams=False):
        self.tc.logger.info("RESULTS:")

        result = self.extraresults
        logs = {}
        if hasattr(self.tc, "extraresults"):
            result.update(self.tc.extraresults)

        for case_name in self.tc._registry['cases']:
            case = self.tc._registry['cases'][case_name]

            (status, log) = self._getTestResultDetails(case)

            t = ""
            if case.id() in self.starttime and case.id() in self.endtime:
                t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)"

            if status not in logs:
                logs[status] = []
            logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
            report = {'status': status}
            if log:
                report['log'] = log
            if dump_streams and case.id() in self.logged_output:
                (stdout, stderr) = self.logged_output[case.id()]
                report['stdout'] = stdout
                report['stderr'] = stderr
            result[case.id()] = report

        for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
            if i not in logs:
                continue
            for l in logs[i]:
                self.tc.logger.info(l)

        if json_file_dir:
            tresultjsonhelper = OETestResultJSONHelper()
            tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)

    def wasSuccessful(self):
        # Override as we unexpected successes aren't failures for us
        return (len(self.failures) == len(self.errors) == 0)

class OEListTestsResult(object):
    def wasSuccessful(self):
        return True

class OETestRunner(_TestRunner):
    streamLoggerClass = OEStreamLogger

    def __init__(self, tc, *args, **kwargs):
        kwargs['stream'] = self.streamLoggerClass(tc.logger)
        super(OETestRunner, self).__init__(*args, **kwargs)
        self.tc = tc
        self.resultclass = OETestResult

    def _makeResult(self):
        return self.resultclass(self.tc, self.stream, self.descriptions,
                self.verbosity)

    def _walk_suite(self, suite, func):
        for obj in suite:
            if isinstance(obj, unittest.suite.TestSuite):
                if len(obj._tests):
                    self._walk_suite(obj, func)
            elif isinstance(obj, unittest.case.TestCase):
                func(self.tc.logger, obj)
                self._walked_cases = self._walked_cases + 1

    def _list_tests_name(self, suite):
        self._walked_cases = 0

        def _list_cases(logger, case):
            oetags = []
            if hasattr(case, '__oeqa_testtags'):
                oetags = getattr(case, '__oeqa_testtags')
            if oetags:
                logger.info("%s (%s)" % (case.id(), ",".join(oetags)))
            else:
                logger.info("%s" % (case.id()))

        self.tc.logger.info("Listing all available tests:")
        self._walked_cases = 0
        self.tc.logger.info("test (tags)")
        self.tc.logger.info("-" * 80)
        self._walk_suite(suite, _list_cases)
        self.tc.logger.info("-" * 80)
        self.tc.logger.info("Total found:\t%s" % self._walked_cases)

    def _list_tests_class(self, suite):
        self._walked_cases = 0

        curr = {}
        def _list_classes(logger, case):
            if not 'module' in curr or curr['module'] != case.__module__:
                curr['module'] = case.__module__
                logger.info(curr['module'])

            if not 'class' in curr  or curr['class'] != \
                    case.__class__.__name__:
                curr['class'] = case.__class__.__name__
                logger.info(" -- %s" % curr['class'])

            logger.info(" -- -- %s" % case._testMethodName)

        self.tc.logger.info("Listing all available test classes:")
        self._walk_suite(suite, _list_classes)

    def _list_tests_module(self, suite):
        self._walked_cases = 0

        listed = []
        def _list_modules(logger, case):
            if not case.__module__ in listed:
                if case.__module__.startswith('_'):
                    logger.info("%s (hidden)" % case.__module__)
                else:
                    logger.info(case.__module__)
                listed.append(case.__module__)

        self.tc.logger.info("Listing all available test modules:")
        self._walk_suite(suite, _list_modules)

    def list_tests(self, suite, display_type):
        if display_type == 'name':
            self._list_tests_name(suite)
        elif display_type == 'class':
            self._list_tests_class(suite)
        elif display_type == 'module':
            self._list_tests_module(suite)

        return OEListTestsResult()

class OETestResultJSONHelper(object):

    testresult_filename = 'testresults.json'

    def _get_existing_testresults_if_available(self, write_dir):
        testresults = {}
        file = os.path.join(write_dir, self.testresult_filename)
        if os.path.exists(file):
            with open(file, "r") as f:
                testresults = json.load(f)
        return testresults

    def _write_file(self, write_dir, file_name, file_content):
        file_path = os.path.join(write_dir, file_name)
        with open(file_path, 'w') as the_file:
            the_file.write(file_content)

    def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
        bb.utils.mkdirhier(write_dir)
        lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
        test_results = self._get_existing_testresults_if_available(write_dir)
        test_results[result_id] = {'configuration': configuration, 'result': test_result}
        json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
        self._write_file(write_dir, self.testresult_filename, json_testresults)
        bb.utils.unlockfile(lf)