summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool/manualexecution.py
blob: dc368f36fcc17350175674809ead6fe033333906 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
# test case management tool - manual execution from testopia test cases
#
# Copyright (c) 2018, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
# more details.
#
import argparse
import json
import os
import sys
import datetime
import re
import copy
from oeqa.core.runner import OETestResultJSONHelper


def load_json_file(f):
    with open(f, "r") as filedata:
        return json.load(filedata)

def write_json_file(f, json_data):
    os.makedirs(os.path.dirname(f), exist_ok=True)
    with open(f, 'w') as filedata:
        filedata.write(json.dumps(json_data, sort_keys=True, indent=4))

class ManualTestRunner(object):

    def _get_test_module(self, case_file):
        return os.path.basename(case_file).split('.')[0]

    def _get_input(self, config):
        while True:
            output = input('{} = '.format(config))
            if re.match('^[a-z0-9-.]+$', output):
                break
            print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
        return output

    def _get_available_config_options(self, config_options, test_module, target_config):
        avail_config_options = None
        if test_module in config_options:
            avail_config_options = config_options[test_module].get(target_config)
        return avail_config_options

    def _choose_config_option(self, options):
        while True:
            output = input('{} = '.format('Option index number'))
            if output in options:
                break
            print('Only integer index inputs from above available configuration options are allowed. Please try again.')
        return options[output]

    def _get_config(self, config_options, test_module):
        from oeqa.utils.metadata import get_layers
        from oeqa.utils.commands import get_bb_var
        from resulttool.resultutils import store_map

        layers = get_layers(get_bb_var('BBLAYERS'))
        configurations = {}
        configurations['LAYERS'] = layers
        configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        configurations['TEST_TYPE'] = 'manual'
        configurations['TEST_MODULE'] = test_module

        extra_config = set(store_map['manual']) - set(configurations)
        for config in sorted(extra_config):
            avail_config_options = self._get_available_config_options(config_options, test_module, config)
            if avail_config_options:
                print('---------------------------------------------')
                print('These are available configuration #%s options:' % config)
                print('---------------------------------------------')
                for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
                    print('%s: %s' % (option, avail_config_options[option]))
                print('Please select configuration option, enter the integer index number.')
                value_conf = self._choose_config_option(avail_config_options)
                print('---------------------------------------------\n')
            else:
                print('---------------------------------------------')
                print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
                print('---------------------------------------------')
                value_conf = self._get_input('Configuration Value')
                print('---------------------------------------------\n')
            configurations[config] = value_conf
        return configurations

    def _execute_test_steps(self, case):
        test_result = {}
        print('------------------------------------------------------------------------')
        print('Executing test case: %s' % case['test']['@alias'])
        print('------------------------------------------------------------------------')
        print('You have total %s test steps to be executed.' % len(case['test']['execution']))
        print('------------------------------------------------------------------------\n')
        for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
            print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
            expected_output = case['test']['execution'][step]['expected_results']
            if expected_output:
                print('Expected output: %s' % expected_output)
        while True:
            done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
            result_types = {'p':'PASSED',
                            'f':'FAILED',
                            'b':'BLOCKED',
                            's':'SKIPPED'}
            if done in result_types:
                for r in result_types:
                    if done == r:
                        res = result_types[r]
                        if res == 'FAILED':
                            log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
                            test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
                        else:
                            test_result.update({case['test']['@alias']: {'status': '%s' % res}})
                break
            print('Invalid input!')
        return test_result

    def _get_write_dir(self):
        return os.environ['BUILDDIR'] + '/tmp/log/manual/'

    def run_test(self, case_file, config_options_file, testcase_config_file):
        test_module = self._get_test_module(case_file)
        cases = load_json_file(case_file)
        config_options = {}
        if config_options_file:
            config_options = load_json_file(config_options_file)
        configurations = self._get_config(config_options, test_module)
        result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
        test_results = {}
        if testcase_config_file:
            test_case_config = load_json_file(testcase_config_file)
            test_case_to_execute = test_case_config['testcases']
            for case in copy.deepcopy(cases) :
                if case['test']['@alias'] not in test_case_to_execute:
                    cases.remove(case)

        print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
        for c in cases:
            test_result = self._execute_test_steps(c)
            test_results.update(test_result)
        return configurations, result_id, self._get_write_dir(), test_results

    def _get_true_false_input(self, input_message):
        yes_list = ['Y', 'YES']
        no_list = ['N', 'NO']
        while True:
            more_config_option = input(input_message).upper()
            if more_config_option in yes_list or more_config_option in no_list:
                break
            print('Invalid input!')
        if more_config_option in no_list:
            return False
        return True

    def make_config_option_file(self, logger, case_file, config_options_file):
        config_options = {}
        if config_options_file:
            config_options = load_json_file(config_options_file)
        new_test_module = self._get_test_module(case_file)
        print('Creating configuration options file for test module: %s' % new_test_module)
        new_config_options = {}

        while True:
            config_name = input('\nPlease provide test configuration to create:\n').upper()
            new_config_options[config_name] = {}
            while True:
                config_value = self._get_input('Configuration possible option value')
                config_option_index = len(new_config_options[config_name]) + 1
                new_config_options[config_name][config_option_index] = config_value
                more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
                if not more_config_option:
                    break
            more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
            if not more_config:
                break

        if new_config_options:
            config_options[new_test_module] = new_config_options
        if not config_options_file:
            config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
        write_json_file(config_options_file, config_options)
        logger.info('Configuration option file created at %s' % config_options_file)

def manualexecution(args, logger):
    testrunner = ManualTestRunner()
    if args.make_config_options_file:
        testrunner.make_config_option_file(logger, args.file, args.config_options_file)
        return 0
    configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
    resultjsonhelper = OETestResultJSONHelper()
    resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
    return 0

def register_commands(subparsers):
    """Register subcommands from this plugin"""
    parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
                                         description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
                                         group='manualexecution')
    parser_build.set_defaults(func=manualexecution)
    parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
    parser_build.add_argument('-c', '--config-options-file', default='',
                              help='the config options file to import and used as available configuration option selection or make config option file')
    parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
                              help='make the configuration options file based on provided inputs')
    parser_build.add_argument('-t', '--testcase-config-file', default='',
                              help='the testcase configuration file to enable user to run a selected set of test case')