• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mozilla / mozregression / 14310821897

07 Apr 2025 01:43PM CUT coverage: 35.021%. First build
14310821897

Pull #1960

github

web-flow
Merge 8d54d20c5 into 807564865
Pull Request #1960: build(deps): bump yarl from 1.9.4 to 1.19.0

989 of 2824 relevant lines covered (35.02%)

0.35 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

26.61
/mozregression/test_runner.py
1
"""
2
This module implements a :class:`TestRunner` interface for testing builds
3
and a default implementation :class:`ManualTestRunner`.
4
"""
5

6
from __future__ import absolute_import, print_function
1✔
7

8
import datetime
1✔
9
import os
1✔
10
import shlex
1✔
11
import subprocess
1✔
12
import sys
1✔
13
from abc import ABCMeta, abstractmethod
1✔
14

15
from mozlog import get_proxy_logger
1✔
16

17
from mozregression.errors import LauncherError, TestCommandError
1✔
18
from mozregression.launchers import create_launcher as mozlauncher
1✔
19

20
LOG = get_proxy_logger("Test Runner")
1✔
21

22

23
def create_launcher(build_info):
1✔
24
    """
25
    Create and returns a :class:`mozregression.launchers.Launcher`.
26
    """
27
    if build_info.build_type == "nightly":
×
28
        if isinstance(build_info.build_date, datetime.datetime):
×
29
            desc = "for buildid %s" % build_info.build_date.strftime("%Y%m%d%H%M%S")
×
30
        else:
31
            desc = "for %s" % build_info.build_date
×
32
    else:
33
        desc = "built on %s, revision %s" % (
×
34
            build_info.build_date,
35
            build_info.short_changeset,
36
        )
37
    LOG.info("Running %s build %s" % (build_info.repo_name, desc))
×
38

39
    return mozlauncher(build_info)
×
40

41

42
class TestRunner(metaclass=ABCMeta):
1✔
43
    """
44
    Abstract class that allows to test a build.
45

46
    :meth:`evaluate` must be implemented by subclasses.
47
    """
48

49
    @abstractmethod
1✔
50
    def evaluate(self, build_info, allow_back=False):
1✔
51
        """
52
        Evaluate a given build. Must returns a tuple of (verdict, app_info).
53

54
        The verdict must be a letter that indicate the state of the build:
55
        'g', 'b', 's', 'r' or 'e' respectively for 'good', 'bad', 'skip',
56
        'retry' or 'exit'. If **allow_back** is True, it is also possible
57
        to return 'back'.
58

59
        The app_info is the return value of the
60
        :meth:`mozregression.launchers.Launcher.get_app_info` for this
61
        particular build.
62

63
        :param build_path: the path to the build file to test
64
        :param build_info: a :class:`mozrgression.uild_info.BuildInfo` instance
65
        :param allow_back: indicate if the back command should be proposed.
66
        """
67
        raise NotImplementedError
68

69
    @abstractmethod
1✔
70
    def run_once(self, build_info):
1✔
71
        """
72
        Run the given build and wait for its completion. Return the error
73
        code when available.
74
        """
75
        raise NotImplementedError
76

77
    def index_to_try_after_skip(self, build_range):
1✔
78
        """
79
        Return the index of the build to use after a build was skipped.
80

81
        By default this only returns the mid point of the remaining range.
82
        """
83
        return build_range.mid_point()
×
84

85

86
class ManualTestRunner(TestRunner):
1✔
87
    """
88
    A TestRunner subclass that run builds and ask for evaluation by
89
    prompting in the terminal.
90
    """
91

92
    def __init__(self, launcher_kwargs=None):
1✔
93
        TestRunner.__init__(self)
×
94
        self.launcher_kwargs = launcher_kwargs or {}
×
95

96
    def get_verdict(self, build_info, allow_back):
1✔
97
        """
98
        Ask and returns the verdict.
99
        """
100
        options = ["good", "bad", "skip", "retry", "exit"]
×
101
        if allow_back:
×
102
            options.insert(-1, "back")
×
103
        # allow user to just type one letter
104
        allowed_inputs = options + [o[0] for o in options]
×
105
        # format options to nice printing
106
        formatted_options = ", ".join(["'%s'" % o for o in options[:-1]]) + " or '%s'" % options[-1]
×
107
        verdict = ""
×
108
        while verdict not in allowed_inputs:
×
109
            verdict = input(
×
110
                "Was this %s build good, bad, or broken?"
111
                " (type %s and press Enter): " % (build_info.build_type, formatted_options)
112
            )
113

114
        if verdict == "back":
×
115
            return "back"
×
116
        # shorten verdict to one character for processing...
117
        return verdict[0]
×
118

119
    def evaluate(self, build_info, allow_back=False):
1✔
120
        with create_launcher(build_info) as launcher:
×
121
            launcher.start(**self.launcher_kwargs)
×
122
            build_info.update_from_app_info(launcher.get_app_info())
×
123
            verdict = self.get_verdict(build_info, allow_back)
×
124
            try:
×
125
                launcher.stop()
×
126
            except LauncherError:
×
127
                # we got an error on process termination, but user
128
                # already gave the verdict, so pass this "silently"
129
                # (it would be logged from the launcher anyway)
130
                launcher._running = False
×
131
        return verdict
×
132

133
    def run_once(self, build_info):
1✔
134
        with create_launcher(build_info) as launcher:
×
135
            launcher.start(**self.launcher_kwargs)
×
136
            build_info.update_from_app_info(launcher.get_app_info())
×
137
            return launcher.wait()
×
138

139
    def index_to_try_after_skip(self, build_range):
1✔
140
        mid = TestRunner.index_to_try_after_skip(self, build_range)
×
141
        build_range_len = len(build_range)
×
142
        if build_range_len <= 3:
×
143
            # do not even ask if there is only one build to choose
144
            return mid
×
145
        min = -mid + 1
×
146
        max = build_range_len - mid - 2
×
147
        valid_range = list(range(min, max + 1))
×
148
        print(
×
149
            "Build was skipped. You can manually choose a new build to"
150
            " test, to be able to get out of a broken build range."
151
        )
152
        print(
×
153
            "Please type the index of the build you would like to try - the"
154
            " index is 0-based on the middle of the remaining build range."
155
        )
156
        print("You can choose a build index between [%d, %d]:" % (min, max))
×
157
        while True:
×
158
            value = input("> ")
×
159
            try:
×
160
                index = int(value)
×
161
                if index in valid_range:
×
162
                    return mid + index
×
163
            except ValueError:
×
164
                pass
165

166

167
def _raise_command_error(exc, msg=""):
1✔
168
    raise TestCommandError("Unable to run the test command%s: `%s`" % (msg, exc))
×
169

170

171
class CommandTestRunner(TestRunner):
1✔
172
    """
173
    A TestRunner subclass that evaluate builds given a shell command.
174

175
    Some variables may be used to evaluate the builds:
176
     - variables referenced in :meth:`TestRunner.evaluate`
177
     - app_name (the tested application name: firefox, ...)
178
     - binary (the path to the binary when applicable - not for fennec)
179

180
    These variables can be used in two ways:
181
    1. as environment variables. 'MOZREGRESSION_' is prepended and the
182
       variables names are upcased. Example: MOZREGRESSION_BINARY
183
    2. as placeholders in the command line. variables names must be enclosed
184
       with curly brackets. Example:
185
       `mozmill -app firefox -b {binary} -t path/to/test.js`
186
    """
187

188
    def __init__(self, command):
1✔
189
        TestRunner.__init__(self)
×
190
        self.command = command
×
191

192
    def evaluate(self, build_info, allow_back=False):
1✔
193
        with create_launcher(build_info) as launcher:
×
194
            build_info.update_from_app_info(launcher.get_app_info())
×
195
            variables = {k: v for k, v in build_info.to_dict().items()}
×
196
            if hasattr(launcher, "binary"):
×
197
                variables["binary"] = launcher.binary
×
198

199
            env = dict(os.environ)
×
200
            for k, v in variables.items():
×
201
                env["MOZREGRESSION_" + k.upper()] = str(v)
×
202
            try:
×
203
                command = self.command.format(**variables)
×
204
            except KeyError as exc:
×
205
                _raise_command_error(exc, " (formatting error)")
×
206
            command = os.path.expanduser(command)
×
207
            LOG.info("Running test command: `%s`" % command)
×
208

209
            # `shlex.split` does parsing and escaping that isn't compatible with Windows.
210
            if sys.platform == "win32":
×
211
                cmdlist = command
×
212
            else:
213
                cmdlist = shlex.split(command)
×
214

215
            try:
×
216
                retcode = subprocess.call(cmdlist, env=env)
×
217
            except IndexError:
×
218
                _raise_command_error("Empty command")
×
219
            except OSError as exc:
×
220
                _raise_command_error(
×
221
                    exc,
222
                    " (%s not found or not executable)"
223
                    % (command if sys.platform == "win32" else cmdlist[0]),
224
                )
225
        LOG.info(
×
226
            "Test command result: %d (build is %s)" % (retcode, "good" if retcode == 0 else "bad")
227
        )
228
        return "g" if retcode == 0 else "b"
×
229

230
    def run_once(self, build_info):
1✔
231
        return 0 if self.evaluate(build_info) == "g" else 1
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc