Skip to content

API documentation

algorithms

AlgorithmBase

Bases: ABC

Abstract base class for the algorithm implementations.

Source code in src/pytest_split/algorithms.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
class AlgorithmBase(ABC):
    """Abstract base class for the algorithm implementations."""

    @abstractmethod
    def __call__(
        self, splits: int, items: "List[nodes.Item]", durations: "Dict[str, float]"
    ) -> "List[TestGroup]":
        pass

    def __hash__(self) -> int:
        return hash(self.__class__.__name__)

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, AlgorithmBase):
            return NotImplemented
        return self.__class__.__name__ == other.__class__.__name__

DurationBasedChunksAlgorithm

Bases: AlgorithmBase

Split tests into groups by runtime. Ensures tests are split into non-overlapping groups. The original list of test items is split into groups by finding boundary indices i_0, i_1, i_2 and creating group_1 = items[0:i_0], group_2 = items[i_0, i_1], group_3 = items[i_1, i_2], ...

:param splits: How many groups we're splitting in. :param items: Test items passed down by Pytest. :param durations: Our cached test runtimes. Assumes contains timings only of relevant tests :return: List of TestGroup

Source code in src/pytest_split/algorithms.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
class DurationBasedChunksAlgorithm(AlgorithmBase):
    """
    Split tests into groups by runtime.
    Ensures tests are split into non-overlapping groups.
    The original list of test items is split into groups by finding boundary indices i_0, i_1, i_2
    and creating group_1 = items[0:i_0], group_2 = items[i_0, i_1], group_3 = items[i_1, i_2], ...

    :param splits: How many groups we're splitting in.
    :param items: Test items passed down by Pytest.
    :param durations: Our cached test runtimes. Assumes contains timings only of relevant tests
    :return: List of TestGroup
    """

    def __call__(
        self, splits: int, items: "List[nodes.Item]", durations: "Dict[str, float]"
    ) -> "List[TestGroup]":
        items_with_durations = _get_items_with_durations(items, durations)
        time_per_group = sum(map(itemgetter(1), items_with_durations)) / splits

        selected: List[List[nodes.Item]] = [[] for i in range(splits)]
        deselected: List[List[nodes.Item]] = [[] for i in range(splits)]
        duration: List[float] = [0 for i in range(splits)]

        group_idx = 0
        for item, item_duration in items_with_durations:
            if duration[group_idx] >= time_per_group:
                group_idx += 1

            selected[group_idx].append(item)
            for i in range(splits):
                if i != group_idx:
                    deselected[i].append(item)
            duration[group_idx] += item_duration

        return [
            TestGroup(
                selected=selected[i], deselected=deselected[i], duration=duration[i]
            )
            for i in range(splits)
        ]

LeastDurationAlgorithm

Bases: AlgorithmBase

Split tests into groups by runtime. It walks the test items, starting with the test with largest duration. It assigns the test with the largest runtime to the group with the smallest duration sum.

The algorithm sorts the items by their duration. Since the sorting algorithm is stable, ties will be broken by maintaining the original order of items. It is therefore important that the order of items be identical on all nodes that use this plugin. Due to issue #25 this might not always be the case.

:param splits: How many groups we're splitting in. :param items: Test items passed down by Pytest. :param durations: Our cached test runtimes. Assumes contains timings only of relevant tests :return: List of groups

Source code in src/pytest_split/algorithms.py
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
class LeastDurationAlgorithm(AlgorithmBase):
    """
    Split tests into groups by runtime.
    It walks the test items, starting with the test with largest duration.
    It assigns the test with the largest runtime to the group with the smallest duration sum.

    The algorithm sorts the items by their duration. Since the sorting algorithm is stable, ties will be broken by
    maintaining the original order of items. It is therefore important that the order of items be identical on all nodes
    that use this plugin. Due to issue #25 this might not always be the case.

    :param splits: How many groups we're splitting in.
    :param items: Test items passed down by Pytest.
    :param durations: Our cached test runtimes. Assumes contains timings only of relevant tests
    :return:
        List of groups
    """

    def __call__(
        self, splits: int, items: "List[nodes.Item]", durations: "Dict[str, float]"
    ) -> "List[TestGroup]":
        items_with_durations = _get_items_with_durations(items, durations)

        # add index of item in list
        items_with_durations_indexed = [
            (*tup, i) for i, tup in enumerate(items_with_durations)
        ]

        # Sort by name to ensure it's always the same order
        items_with_durations_indexed = sorted(
            items_with_durations_indexed, key=lambda tup: str(tup[0])
        )

        # sort in ascending order
        sorted_items_with_durations = sorted(
            items_with_durations_indexed, key=lambda tup: tup[1], reverse=True
        )

        selected: List[List[Tuple[nodes.Item, int]]] = [[] for _ in range(splits)]
        deselected: List[List[nodes.Item]] = [[] for _ in range(splits)]
        duration: List[float] = [0 for _ in range(splits)]

        # create a heap of the form (summed_durations, group_index)
        heap: List[Tuple[float, int]] = [(0, i) for i in range(splits)]
        heapq.heapify(heap)
        for item, item_duration, original_index in sorted_items_with_durations:
            # get group with smallest sum
            summed_durations, group_idx = heapq.heappop(heap)
            new_group_durations = summed_durations + item_duration

            # store assignment
            selected[group_idx].append((item, original_index))
            duration[group_idx] = new_group_durations
            for i in range(splits):
                if i != group_idx:
                    deselected[i].append(item)

            # store new duration - in case of ties it sorts by the group_idx
            heapq.heappush(heap, (new_group_durations, group_idx))

        groups = []
        for i in range(splits):
            # sort the items by their original index to maintain relative ordering
            # we don't care about the order of deselected items
            s = [
                item
                for item, original_index in sorted(selected[i], key=lambda tup: tup[1])
            ]
            group = TestGroup(
                selected=s, deselected=deselected[i], duration=duration[i]
            )
            groups.append(group)
        return groups

ipynb_compatibility

ensure_ipynb_compatibility(group: TestGroup, items: list) -> None

Ensures that group doesn't contain partial IPy notebook cells.

pytest-split might, in principle, break up the cells of an IPython notebook into different test groups, in which case the tests most likely fail (for starters, libraries are imported in Cell 0, so all subsequent calls to the imported libraries in the following cells will raise NameError).

Source code in src/pytest_split/ipynb_compatibility.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def ensure_ipynb_compatibility(group: "TestGroup", items: list) -> None:  # type: ignore[type-arg]
    """
    Ensures that group doesn't contain partial IPy notebook cells.

    ``pytest-split`` might, in principle, break up the cells of an
    IPython notebook into different test groups, in which case the tests
    most likely fail (for starters, libraries are imported in Cell 0, so
    all subsequent calls to the imported libraries in the following cells
    will raise ``NameError``).

    """
    if not group.selected or not _is_ipy_notebook(group.selected[0].nodeid):
        return

    item_node_ids = [item.nodeid for item in items]

    # Deal with broken up notebooks at the beginning of the test group
    first = group.selected[0].nodeid
    siblings = _find_sibiling_ipynb_cells(first, item_node_ids)
    if first != siblings[0]:
        for item in list(group.selected):
            if item.nodeid in siblings:
                group.deselected.append(item)
                group.selected.remove(item)

    if not group.selected or not _is_ipy_notebook(group.selected[-1].nodeid):
        return

    # Deal with broken up notebooks at the end of the test group
    last = group.selected[-1].nodeid
    siblings = _find_sibiling_ipynb_cells(last, item_node_ids)
    if last != siblings[-1]:
        for item in list(group.deselected):
            if item.nodeid in siblings:
                group.deselected.remove(item)
                group.selected.append(item)

plugin

Base

Source code in src/pytest_split/plugin.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
class Base:
    def __init__(self, config: "Config") -> None:
        """
        Load durations and set up a terminal writer.

        This logic is shared for both the split- and cache plugin.
        """
        self.config = config
        self.writer = create_terminal_writer(self.config)

        try:
            with open(config.option.durations_path) as f:
                self.cached_durations = json.loads(f.read())
        except FileNotFoundError:
            self.cached_durations = {}

        # This code provides backwards compatibility after we switched
        # from saving durations in a list-of-lists to a dict format
        # Remove this when bumping to v1
        if isinstance(self.cached_durations, list):
            self.cached_durations = dict(self.cached_durations)

__init__(config: Config) -> None

Load durations and set up a terminal writer.

This logic is shared for both the split- and cache plugin.

Source code in src/pytest_split/plugin.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def __init__(self, config: "Config") -> None:
    """
    Load durations and set up a terminal writer.

    This logic is shared for both the split- and cache plugin.
    """
    self.config = config
    self.writer = create_terminal_writer(self.config)

    try:
        with open(config.option.durations_path) as f:
            self.cached_durations = json.loads(f.read())
    except FileNotFoundError:
        self.cached_durations = {}

    # This code provides backwards compatibility after we switched
    # from saving durations in a list-of-lists to a dict format
    # Remove this when bumping to v1
    if isinstance(self.cached_durations, list):
        self.cached_durations = dict(self.cached_durations)

PytestSplitCachePlugin

Bases: Base

The cache plugin writes durations to our durations file.

Source code in src/pytest_split/plugin.py
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
class PytestSplitCachePlugin(Base):
    """
    The cache plugin writes durations to our durations file.
    """

    def pytest_sessionfinish(self) -> None:
        """
        Method is called by Pytest after the test-suite has run.
        https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L308
        """
        terminal_reporter = self.config.pluginmanager.get_plugin("terminalreporter")
        test_durations: Dict[str, float] = {}

        for test_reports in terminal_reporter.stats.values():  # type: ignore[union-attr]
            for test_report in test_reports:
                if isinstance(test_report, TestReport):
                    # These ifs be removed after this is solved: # https://github.com/spulec/freezegun/issues/286
                    if test_report.duration < 0:
                        continue  # pragma: no cover
                    if (
                        test_report.when in ("teardown", "setup")
                        and test_report.duration
                        > STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD
                    ):
                        # Ignore not legit teardown durations
                        continue  # pragma: no cover

                    # Add test durations to map
                    if test_report.nodeid not in test_durations:
                        test_durations[test_report.nodeid] = 0
                    test_durations[test_report.nodeid] += test_report.duration

        if self.config.option.clean_durations:
            self.cached_durations = dict(test_durations)
        else:
            for k, v in test_durations.items():
                self.cached_durations[k] = v

        with open(self.config.option.durations_path, "w") as f:
            json.dump(self.cached_durations, f, sort_keys=True, indent=4)

        message = self.writer.markup(
            f"\n\n[pytest-split] Stored test durations in {self.config.option.durations_path}"
        )
        self.writer.line(message)

pytest_sessionfinish() -> None

Method is called by Pytest after the test-suite has run. https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L308

Source code in src/pytest_split/plugin.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
def pytest_sessionfinish(self) -> None:
    """
    Method is called by Pytest after the test-suite has run.
    https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L308
    """
    terminal_reporter = self.config.pluginmanager.get_plugin("terminalreporter")
    test_durations: Dict[str, float] = {}

    for test_reports in terminal_reporter.stats.values():  # type: ignore[union-attr]
        for test_report in test_reports:
            if isinstance(test_report, TestReport):
                # These ifs be removed after this is solved: # https://github.com/spulec/freezegun/issues/286
                if test_report.duration < 0:
                    continue  # pragma: no cover
                if (
                    test_report.when in ("teardown", "setup")
                    and test_report.duration
                    > STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD
                ):
                    # Ignore not legit teardown durations
                    continue  # pragma: no cover

                # Add test durations to map
                if test_report.nodeid not in test_durations:
                    test_durations[test_report.nodeid] = 0
                test_durations[test_report.nodeid] += test_report.duration

    if self.config.option.clean_durations:
        self.cached_durations = dict(test_durations)
    else:
        for k, v in test_durations.items():
            self.cached_durations[k] = v

    with open(self.config.option.durations_path, "w") as f:
        json.dump(self.cached_durations, f, sort_keys=True, indent=4)

    message = self.writer.markup(
        f"\n\n[pytest-split] Stored test durations in {self.config.option.durations_path}"
    )
    self.writer.line(message)

PytestSplitPlugin

Bases: Base

Source code in src/pytest_split/plugin.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
class PytestSplitPlugin(Base):
    def __init__(self, config: "Config"):
        super().__init__(config)

        if not self.cached_durations:
            message = self.writer.markup(
                "\n[pytest-split] No test durations found. Pytest-split will "
                "split tests evenly when no durations are found. "
                "\n[pytest-split] You can expect better results in consequent runs, "
                "when test timings have been documented.\n"
            )
            self.writer.line(message)

    @hookimpl(trylast=True)
    def pytest_collection_modifyitems(
        self, config: "Config", items: "List[nodes.Item]"
    ) -> None:
        """
        Collect and select the tests we want to run, and deselect the rest.
        """
        splits: int = config.option.splits
        group_idx: int = config.option.group

        algo = algorithms.Algorithms[config.option.splitting_algorithm].value
        groups = algo(splits, items, self.cached_durations)
        group = groups[group_idx - 1]

        ensure_ipynb_compatibility(group, items)

        items[:] = group.selected
        config.hook.pytest_deselected(items=group.deselected)

        self.writer.line(
            self.writer.markup(
                f"\n\n[pytest-split] Splitting tests with algorithm: {config.option.splitting_algorithm}"
            )
        )
        self.writer.line(
            self.writer.markup(
                f"[pytest-split] Running group {group_idx}/{splits} (estimated duration: {group.duration:.2f}s)\n"
            )
        )

pytest_collection_modifyitems(config: Config, items: List[nodes.Item]) -> None

Collect and select the tests we want to run, and deselect the rest.

Source code in src/pytest_split/plugin.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
@hookimpl(trylast=True)
def pytest_collection_modifyitems(
    self, config: "Config", items: "List[nodes.Item]"
) -> None:
    """
    Collect and select the tests we want to run, and deselect the rest.
    """
    splits: int = config.option.splits
    group_idx: int = config.option.group

    algo = algorithms.Algorithms[config.option.splitting_algorithm].value
    groups = algo(splits, items, self.cached_durations)
    group = groups[group_idx - 1]

    ensure_ipynb_compatibility(group, items)

    items[:] = group.selected
    config.hook.pytest_deselected(items=group.deselected)

    self.writer.line(
        self.writer.markup(
            f"\n\n[pytest-split] Splitting tests with algorithm: {config.option.splitting_algorithm}"
        )
    )
    self.writer.line(
        self.writer.markup(
            f"[pytest-split] Running group {group_idx}/{splits} (estimated duration: {group.duration:.2f}s)\n"
        )
    )

pytest_addoption(parser: Parser) -> None

Declare pytest-split's options.

Source code in src/pytest_split/plugin.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def pytest_addoption(parser: "Parser") -> None:
    """
    Declare pytest-split's options.
    """
    group = parser.getgroup(
        "Split tests into groups which execution time is about the same. "
        "Run with --store-durations to store information about test execution times."
    )
    group.addoption(
        "--store-durations",
        dest="store_durations",
        action="store_true",
        help="Store durations into '--durations-path'.",
    )
    group.addoption(
        "--durations-path",
        dest="durations_path",
        help=(
            "Path to the file in which durations are (to be) stored, "
            "default is .test_durations in the current working directory"
        ),
        default=os.path.join(os.getcwd(), ".test_durations"),
    )
    group.addoption(
        "--splits",
        dest="splits",
        type=int,
        help="The number of groups to split the tests into",
    )
    group.addoption(
        "--group",
        dest="group",
        type=int,
        help="The group of tests that should be executed (first one is 1)",
    )
    group.addoption(
        "--splitting-algorithm",
        dest="splitting_algorithm",
        type=str,
        help=f"Algorithm used to split the tests. Choices: {algorithms.Algorithms.names()}",
        default="duration_based_chunks",
        choices=algorithms.Algorithms.names(),
    )
    group.addoption(
        "--clean-durations",
        dest="clean_durations",
        action="store_true",
        help=(
            "Removes the test duration info for tests which are not present "
            "while running the suite with '--store-durations'."
        ),
    )

pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]

Validate options.

Source code in src/pytest_split/plugin.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config: "Config") -> "Optional[Union[int, ExitCode]]":
    """
    Validate options.
    """
    group = config.getoption("group")
    splits = config.getoption("splits")

    if splits is None and group is None:
        return None

    if splits and group is None:
        raise pytest.UsageError("argument `--group` is required")

    if group and splits is None:
        raise pytest.UsageError("argument `--splits` is required")

    if splits < 1:
        raise pytest.UsageError("argument `--splits` must be >= 1")

    if group < 1 or group > splits:
        raise pytest.UsageError(f"argument `--group` must be >= 1 and <= {splits}")

    return None

pytest_configure(config: Config) -> None

Enable the plugins we need.

Source code in src/pytest_split/plugin.py
105
106
107
108
109
110
111
112
113
114
115
def pytest_configure(config: "Config") -> None:
    """
    Enable the plugins we need.
    """
    if config.option.splits and config.option.group:
        config.pluginmanager.register(PytestSplitPlugin(config), "pytestsplitplugin")

    if config.option.store_durations:
        config.pluginmanager.register(
            PytestSplitCachePlugin(config), "pytestsplitcacheplugin"
        )