Merge pull request #738 from pytest-dev/multi-example-tables
This commit is contained in:
commit
f47c6d2fb5
|
@ -7,6 +7,8 @@ Unreleased
|
|||
- Text after the `#` character is no longer stripped from the Scenario and Feature name.
|
||||
- Gherkin keyword aliases can now be used and correctly reported in json and terminal output (see `Keywords <https://cucumber.io/docs/gherkin/reference/#keywords>` for permitted list).
|
||||
- Added localization support. The language of the feature file can be specified using the `# language: <language>` directive at the beginning of the file.
|
||||
- Multiple example tables supported
|
||||
- Added filtering by tags against example tables
|
||||
|
||||
8.0.0b2
|
||||
----------
|
||||
|
|
65
README.rst
65
README.rst
|
@ -514,6 +514,71 @@ Example:
|
|||
assert cucumbers["start"] - cucumbers["eat"] == left
|
||||
|
||||
|
||||
Scenario Outlines with Multiple Example Tables
|
||||
----------------------------------------------
|
||||
|
||||
In `pytest-bdd`, you can use multiple example tables in a scenario outline to test
|
||||
different sets of input data under various conditions.
|
||||
You can define separate `Examples` blocks, each with its own table of data,
|
||||
and optionally tag them to differentiate between positive, negative, or any other conditions.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: gherkin
|
||||
|
||||
# content of scenario_outline.feature
|
||||
|
||||
Feature: Scenario outlines with multiple examples tables
|
||||
Scenario Outline: Outlined with multiple example tables
|
||||
Given there are <start> cucumbers
|
||||
When I eat <eat> cucumbers
|
||||
Then I should have <left> cucumbers
|
||||
|
||||
@positive
|
||||
Examples: Positive results
|
||||
| start | eat | left |
|
||||
| 12 | 5 | 7 |
|
||||
| 5 | 4 | 1 |
|
||||
|
||||
@negative
|
||||
Examples: Impossible negative results
|
||||
| start | eat | left |
|
||||
| 3 | 9 | -6 |
|
||||
| 1 | 4 | -3 |
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from pytest_bdd import scenarios, given, when, then, parsers
|
||||
|
||||
|
||||
scenarios("scenario_outline.feature")
|
||||
|
||||
|
||||
@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers")
|
||||
def given_cucumbers(start):
|
||||
return {"start": start, "eat": 0}
|
||||
|
||||
|
||||
@when(parsers.parse("I eat {eat:d} cucumbers"))
|
||||
def eat_cucumbers(cucumbers, eat):
|
||||
cucumbers["eat"] += eat
|
||||
|
||||
|
||||
@then(parsers.parse("I should have {left:d} cucumbers"))
|
||||
def should_have_left_cucumbers(cucumbers, left):
|
||||
assert cucumbers["start"] - cucumbers["eat"] == left
|
||||
|
||||
|
||||
When you filter scenarios by a tag, only the examples associated with that tag will be executed.
|
||||
This allows you to run a specific subset of your test cases based on the tag.
|
||||
For example, in the following scenario outline, if you filter by the @positive tag,
|
||||
only the examples under the "Positive results" table will be executed, and the "Negative results" table will be ignored.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pytest -k "positive"
|
||||
|
||||
|
||||
Datatables
|
||||
----------
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ class Row:
|
|||
@dataclass
|
||||
class ExamplesTable:
|
||||
location: Location
|
||||
tags: list[Tag]
|
||||
name: str | None = None
|
||||
table_header: Row | None = None
|
||||
table_body: list[Row] | None = field(default_factory=list)
|
||||
|
@ -115,6 +116,7 @@ class ExamplesTable:
|
|||
name=data.get("name"),
|
||||
table_header=Row.from_dict(data["tableHeader"]) if data.get("tableHeader") else None,
|
||||
table_body=[Row.from_dict(row) for row in data.get("tableBody", [])],
|
||||
tags=[Tag.from_dict(tag) for tag in data["tags"]],
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -22,6 +22,18 @@ from .types import STEP_TYPE_BY_PARSER_KEYWORD
|
|||
STEP_PARAM_RE = re.compile(r"<(.+?)>")
|
||||
|
||||
|
||||
def get_tag_names(tag_data: list[GherkinTag]) -> set[str]:
|
||||
"""Extract tag names from tag data.
|
||||
|
||||
Args:
|
||||
tag_data (List[dict]): The tag data to extract names from.
|
||||
|
||||
Returns:
|
||||
set[str]: A set of tag names.
|
||||
"""
|
||||
return {tag.name.lstrip("@") for tag in tag_data}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class Feature:
|
||||
"""Represents a feature parsed from a feature file.
|
||||
|
@ -64,6 +76,7 @@ class Examples:
|
|||
name: str | None = None
|
||||
example_params: list[str] = field(default_factory=list)
|
||||
examples: list[Sequence[str]] = field(default_factory=list)
|
||||
tags: set[str] = field(default_factory=set)
|
||||
|
||||
def set_param_names(self, keys: Iterable[str]) -> None:
|
||||
"""Set the parameter names for the examples.
|
||||
|
@ -124,7 +137,7 @@ class ScenarioTemplate:
|
|||
description: str | None = None
|
||||
tags: set[str] = field(default_factory=set)
|
||||
_steps: list[Step] = field(init=False, default_factory=list)
|
||||
examples: Examples | None = field(default_factory=Examples)
|
||||
examples: list[Examples] = field(default_factory=list[Examples])
|
||||
|
||||
def add_step(self, step: Step) -> None:
|
||||
"""Add a step to the scenario.
|
||||
|
@ -327,18 +340,6 @@ class FeatureParser:
|
|||
self.rel_filename = os.path.join(os.path.basename(basedir), filename)
|
||||
self.encoding = encoding
|
||||
|
||||
@staticmethod
|
||||
def get_tag_names(tag_data: list[GherkinTag]) -> set[str]:
|
||||
"""Extract tag names from tag data.
|
||||
|
||||
Args:
|
||||
tag_data (List[dict]): The tag data to extract names from.
|
||||
|
||||
Returns:
|
||||
set[str]: A set of tag names.
|
||||
"""
|
||||
return {tag.name.lstrip("@") for tag in tag_data}
|
||||
|
||||
def parse_steps(self, steps_data: list[GherkinStep]) -> list[Step]:
|
||||
"""Parse a list of step data into Step objects.
|
||||
|
||||
|
@ -395,16 +396,18 @@ class FeatureParser:
|
|||
name=scenario_data.name,
|
||||
line_number=scenario_data.location.line,
|
||||
templated=templated,
|
||||
tags=self.get_tag_names(scenario_data.tags),
|
||||
tags=get_tag_names(scenario_data.tags),
|
||||
description=textwrap.dedent(scenario_data.description),
|
||||
)
|
||||
for step in self.parse_steps(scenario_data.steps):
|
||||
scenario.add_step(step)
|
||||
|
||||
# Loop over multiple example tables if they exist
|
||||
for example_data in scenario_data.examples:
|
||||
examples = Examples(
|
||||
line_number=example_data.location.line,
|
||||
name=example_data.name,
|
||||
tags=get_tag_names(example_data.tags),
|
||||
)
|
||||
if example_data.table_header is not None:
|
||||
param_names = [cell.value for cell in example_data.table_header.cells]
|
||||
|
@ -413,7 +416,7 @@ class FeatureParser:
|
|||
for row in example_data.table_body:
|
||||
values = [cell.value or "" for cell in row.cells]
|
||||
examples.add_example(values)
|
||||
scenario.examples = examples
|
||||
scenario.examples.append(examples)
|
||||
|
||||
return scenario
|
||||
|
||||
|
@ -444,7 +447,7 @@ class FeatureParser:
|
|||
filename=self.abs_filename,
|
||||
rel_filename=self.rel_filename,
|
||||
name=feature_data.name,
|
||||
tags=self.get_tag_names(feature_data.tags),
|
||||
tags=get_tag_names(feature_data.tags),
|
||||
background=None,
|
||||
line_number=feature_data.location.line,
|
||||
description=textwrap.dedent(feature_data.description),
|
||||
|
|
|
@ -49,7 +49,7 @@ def _pytest_bdd_example() -> dict:
|
|||
|
||||
If no outline is used, we just return an empty dict to render
|
||||
the current template without any actual variable.
|
||||
Otherwise pytest_bdd will add all the context variables in this fixture
|
||||
Otherwise, pytest_bdd will add all the context variables in this fixture
|
||||
from the example definitions in the feature file.
|
||||
"""
|
||||
return {}
|
||||
|
|
|
@ -289,7 +289,7 @@ def _get_scenario_decorator(
|
|||
example_parametrizations,
|
||||
)(scenario_wrapper)
|
||||
|
||||
for tag in templated_scenario.tags.union(feature.tags):
|
||||
for tag in templated_scenario.tags | feature.tags:
|
||||
config = CONFIG_STACK[-1]
|
||||
config.hook.pytest_bdd_apply_tag(tag=tag, function=scenario_wrapper)
|
||||
|
||||
|
@ -303,12 +303,24 @@ def _get_scenario_decorator(
|
|||
def collect_example_parametrizations(
|
||||
templated_scenario: ScenarioTemplate,
|
||||
) -> list[ParameterSet] | None:
|
||||
if templated_scenario.examples is None:
|
||||
return None
|
||||
if contexts := list(templated_scenario.examples.as_contexts()):
|
||||
return [pytest.param(context, id="-".join(context.values())) for context in contexts]
|
||||
else:
|
||||
return None
|
||||
parametrizations = []
|
||||
|
||||
for examples in templated_scenario.examples:
|
||||
tags: set = examples.tags or set()
|
||||
|
||||
example_marks = [getattr(pytest.mark, tag) for tag in tags]
|
||||
|
||||
for context in examples.as_contexts():
|
||||
param_id = "-".join(context.values())
|
||||
parametrizations.append(
|
||||
pytest.param(
|
||||
context,
|
||||
id=param_id,
|
||||
marks=example_marks,
|
||||
),
|
||||
)
|
||||
|
||||
return parametrizations or None
|
||||
|
||||
|
||||
def scenario(
|
||||
|
|
|
@ -79,6 +79,61 @@ def test_outlined(pytester):
|
|||
# fmt: on
|
||||
|
||||
|
||||
def test_multiple_outlined(pytester):
|
||||
pytester.makefile(
|
||||
".feature",
|
||||
outline_multi_example=textwrap.dedent(
|
||||
"""\
|
||||
Feature: Outline With Multiple Examples
|
||||
Scenario Outline: Outlined given, when, thens with multiple examples tables
|
||||
Given there are <start> cucumbers
|
||||
When I eat <eat> cucumbers
|
||||
Then I should have <left> cucumbers
|
||||
|
||||
@positive
|
||||
Examples: Positive results
|
||||
| start | eat | left |
|
||||
| 12 | 5 | 7 |
|
||||
| 5 | 4 | 1 |
|
||||
|
||||
@negative
|
||||
Examples: Negative results
|
||||
| start | eat | left |
|
||||
| 3 | 9 | -6 |
|
||||
| 1 | 4 | -3 |
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
pytester.makeconftest(textwrap.dedent(STEPS))
|
||||
|
||||
pytester.makepyfile(
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
from pytest_bdd import scenarios
|
||||
|
||||
scenarios('outline_multi_example.feature')
|
||||
|
||||
"""
|
||||
)
|
||||
)
|
||||
result = pytester.runpytest("-s")
|
||||
result.assert_outcomes(passed=4)
|
||||
# fmt: off
|
||||
assert collect_dumped_objects(result) == [
|
||||
12, 5.0, "7",
|
||||
5, 4.0, "1",
|
||||
3, 9.0, "-6",
|
||||
1, 4.0, "-3",
|
||||
]
|
||||
# fmt: on
|
||||
result = pytester.runpytest("-k", "positive", "-vv")
|
||||
result.assert_outcomes(passed=2, deselected=2)
|
||||
|
||||
result = pytester.runpytest("-k", "positive or negative", "-vv")
|
||||
result.assert_outcomes(passed=4, deselected=0)
|
||||
|
||||
|
||||
def test_unused_params(pytester):
|
||||
"""Test parametrized scenario when the test function lacks parameters."""
|
||||
|
||||
|
|
|
@ -163,6 +163,7 @@ def test_parser():
|
|||
ExamplesTable(
|
||||
location=Location(column=5, line=26),
|
||||
name="",
|
||||
tags=[],
|
||||
table_header=Row(
|
||||
id="11",
|
||||
location=Location(column=7, line=27),
|
||||
|
|
Loading…
Reference in New Issue