summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/runtime/cases/ptest.py
blob: 1ce22a09e7d64bd16f920bff125f7adafb97061f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import unittest
import pprint

from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.oeid import OETestID
from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.runtime.decorator.package import OEHasPackage
from oeqa.utils.logparser import PtestParser, Result

class PtestRunnerTest(OERuntimeTestCase):

    # a ptest log parser
    def parse_ptest(self, logfile):
        parser = PtestParser()
        result = Result()

        with open(logfile, errors='replace') as f:
            for line in f:
                result_tuple = parser.parse_line(line)
                if not result_tuple:
                    continue
                line_type, category, status, name = result_tuple

                if line_type == 'section' and status == 'begin':
                    current_section = name
                    continue

                if line_type == 'section' and status == 'end':
                    current_section = None
                    continue

                if line_type == 'test' and status == 'pass':
                    result.store(current_section, name, status)
                    continue

                if line_type == 'test' and status == 'fail':
                    result.store(current_section, name, status)
                    continue

                if line_type == 'test' and status == 'skip':
                    result.store(current_section, name, status)
                    continue

        result.sort_tests()
        return result

    @OETestID(1600)
    @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
    @OETestDepends(['ssh.SSHTest.test_ssh'])
    @OEHasPackage(['ptest-runner'])
    @unittest.expectedFailure
    def test_ptestrunner(self):
        status, output = self.target.run('which ptest-runner', 0)
        if status != 0:
            self.skipTest("No -ptest packages are installed in the image")

        import datetime

        test_log_dir = self.td.get('TEST_LOG_DIR', '')
        # The TEST_LOG_DIR maybe NULL when testimage is added after
        # testdata.json is generated.
        if not test_log_dir:
            test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
        # Don't use self.td.get('DATETIME'), it's from testdata.json, not
        # up-to-date, and may cause "File exists" when re-reun.
        datetime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
        ptest_log_dir = '%s.%s' % (ptest_log_dir_link, datetime)
        ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')

        status, output = self.target.run('ptest-runner', 0)
        os.makedirs(ptest_log_dir)
        with open(ptest_runner_log, 'w') as f:
            f.write(output)

        # status != 0 is OK since some ptest tests may fail
        self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")

        if not hasattr(self.tc, "extraresults"):
            self.tc.extraresults = {}
        extras = self.tc.extraresults
        extras['ptestresult.rawlogs'] = {'log': output}

        # Parse and save results
        parse_result = self.parse_ptest(ptest_runner_log)
        parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip'])
        if os.path.exists(ptest_log_dir_link):
            # Remove the old link to create a new one
            os.remove(ptest_log_dir_link)
        os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)

        trans = str.maketrans("()", "__")
        resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'}
        for section in parse_result.result_dict:
            for test, result in parse_result.result_dict[section]:
                testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split())
                extras[testname] = {'status': resmap[result]}

        failed_tests = {}
        for section in parse_result.result_dict:
            failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ]
            if failed_testcases:
                failed_tests[section] = failed_testcases

        if failed_tests:
            self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests))