Multitest

Basic

Initial Context

Required files:

test_plan.py

#!/usr/bin/env python
"""
    A Simple example to show how to access data from initial context passed
    to MultiTest constructor.

    Initial context should be a dictionary, which will be available in
      - Driver.context in driver instances, together with all the started drivers so far
      - env in testcases together with all the drivers
"""

from testplan import test_plan
from testplan.testing.multitest import testsuite, testcase, MultiTest
from testplan.testing.multitest.driver import Driver

TEST_CONTEXT_VALUE = "Data in context"

INITIAL_CONTEXT = {"test_value": TEST_CONTEXT_VALUE}


@testsuite
class SimpleSuite(object):
    @testcase
    def test_initial_context_access(self, env, result):
        """
        env in the testcase has the content of the initial context plus the drivers
        """
        result.equal(env.test_value, TEST_CONTEXT_VALUE)

    @testcase
    def test_driver_captured_data(self, env, result):
        """
        Just to validate the driver captured the data from context during it's startup
        """
        result.equal(env.context_user.value_from_context, TEST_CONTEXT_VALUE)


class ContextUser(Driver):
    """
    A driver that shows how to access the initial context from a driver.
    Driver.context is prepopulated with the initial_context from the plan
    plus the drivers already started
    """

    def __init__(self, **options):
        super(ContextUser, self).__init__(**options)

        self.value_from_context = None

    def starting(self):
        self.value_from_context = (
            self.context.test_value
        )  # just grab the value from self.context


@test_plan(name="Initial context example")
def main(plan):

    plan.add(
        MultiTest(
            "Initial Context example",
            [SimpleSuite()],
            environment=[ContextUser(name="context_user")],
            initial_context=INITIAL_CONTEXT,
        )
    )


if __name__ == "__main__":
    import sys

    sys.exit(not main())

Name Customization

Required files:

test_plan.py

#!/usr/bin/env python
"""
A Simple example to show how to customize name for testsuite and testcase.
"""

from testplan import test_plan
from testplan.testing.multitest import testsuite, testcase, MultiTest


def suite_name_func(cls_name, suite):
    """Function to return a customized name for testsuite."""
    return "{} -- {}".format(cls_name, suite.val)


def case_name_func(func_name, kwargs):
    """Function to return a customized name for parameterized testcase."""
    return "{} ({}+{}={})".format(
        func_name, kwargs["a"], kwargs["b"], kwargs["expected"]
    )


# In @testcase decorator, ``name`` should be a normal string, it can be
# used with ``name_func`` for parametrized testcases. Refer to examples
# "../../Parametrization/test_plan.py"


@testsuite(name="A Simple Suite")
class SimpleSuite(object):
    @testcase(name="A simple testcase")
    def test_example(self, env, result):
        result.equal(
            env.multitest_runtime_info.testcase.name, "A simple testcase"
        )

    @testcase(
        name="Parametrized testcases",
        parameters=((1, 2, 3), (1, 0, 1)),
        name_func=case_name_func,
    )
    def test_equal(self, env, result, a, b, expected):
        result.equal(a + b, expected, description="Equality test")
        result.equal(
            env.multitest_runtime_info.testcase.name,
            case_name_func(
                "Parametrized testcases",
                {"a": a, "b": b, "expected": expected},
            ),
        )


# In @testsuite decorator, ``name`` can be a normal string or a callable
# receiving 2 arguments ``cls_name`` and ``suite``, the former is testsuite
# class name, and the latter is the instance of testsuite class. This can be
# used when multiple instances from the same testsuite class are added into
# one Multitest, and their names in the report can be made different.


@testsuite(name=suite_name_func)
class ComplicatedSuite(object):
    def __init__(self, val):
        self.val = val

    @testcase(name="A testcase with one assertion")
    def test_less_than(self, env, result):
        result.less(self.val, 100, description="{} < 100".format(self.val))


# A multitest has one testsuite instance from ``SimpleSuite`` and 2 instances
# from ``ComplicatedSuite``.


@test_plan(name="Name customization example")
def main(plan):

    plan.add(
        MultiTest(
            "Name customization example",
            suites=[SimpleSuite(), ComplicatedSuite(1), ComplicatedSuite(2)],
        )
    )


if __name__ == "__main__":
    import sys

    sys.exit(not main())

Listing

Basic

Required files:

test_plan_command_line.py

#!/usr/bin/env python
"""
    This example shows how the suites / test cases
    of a test plan can be listed via command line arguments.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan import test_plan


@testsuite
class Alpha(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags="server")
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": "blue"})
    def test_c(self, env, result):
        pass


@testsuite(tags="server")
class Beta(object):
    @testcase(tags="client")
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": "red"})
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "yellow")})
    def test_c(self, env, result):
        pass


@testsuite(tags="client")
class Gamma(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": ("yellow", "red")})
    def test_b(self, env, result):
        pass

    @testcase(parameters=list(range(100)))
    def test_c(self, env, result, val):
        pass


# Test plan accepts command line options for displaying test information.
# You can try running the current script with the sample arguments below
# to see how you can enable test listing via command line.

# Name listing (trims testcases per suite if they exceed a certain number):
# command line: `--info name`
# command line (shortcut): `--list`

# Sample output:

# Primary
# ..Alpha
# ....test_a
# ....test_b
# ...

# Name listing (without any testcase trimming):
# command line: `--info name-full`


# Pattern listing (trims testcases per suite if they exceed a certain number):
# command line `--info pattern`

# Sample output:

# Primary
# ..Primary::Alpha
# ....Primary::Alpha::test_a
# ....Primary::Alpha::test_b  --tags server
# ...

# Pattern listing (without any testcase trimming):
# command line `--info pattern-full`


# Count listing, just displays total number of suites / testcases per multitest.
# command line `--info count`

# Sample output:

# Primary: (2 suites, 6 testcases)
# Secondary: (1 suite, 102 testcases)


# Here are a couple of more examples that demonstrates how
# the listing operation takes test filters & sorters into account.

# `--info name --patterns Primary`
# `--info name --shuffle all`
# `--info name --shuffle all --patterns Primary`
# `--info pattern --patterns Primary --tags client color=blue`


@test_plan(name="Command Line Listing Example")
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

test_plan_programmatic.py

#!/usr/bin/env python
"""
    This example shows how the suites / test cases
    of a test plan can be listed programmatically.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.testing import listing, filtering


@testsuite
class Alpha(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags="server")
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": "blue"})
    def test_c(self, env, result):
        pass


@testsuite(tags="server")
class Beta(object):
    @testcase(tags="client")
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": "red"})
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "yellow")})
    def test_c(self, env, result):
        pass


@testsuite(tags="client")
class Gamma(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": ("yellow", "red")})
    def test_b(self, env, result):
        pass

    @testcase(parameters=list(range(100)))
    def test_c(self, env, result, val):
        pass


# A test lister object prevents Testplan from running tests, but instead
# prints out information (list tests, counts etc) about your test setup.

# You can trigger this functionality by passing an instance of test lister
# as `test_lister` argument to `@test_plan' decorator.


# Default lister, lists by names
name_lister = listing.NameLister()

# Sample output:

# Primary
# ..Alpha
# ....test_a
# ....test_b
# ...

# Like NameLister, but does not trim testcases. May produce
# large output in case of parametrization

expanded_name_lister = listing.ExpandedNameLister()


# Pattern lister, lists tests in a format that is compatible with
# `--patterns` / `--tags` / `--tags-all` arguments
pattern_lister = listing.PatternLister()

# Sample output:

# Primary
# ..Primary:Alpha
# ....Primary:Alpha:test_a
# ....Primary:Alpha:test_b  --tags server
# ...


# Like Pattern lister, but does not trim testcases. May produce
# large output in case of parametrization

expanded_pattern_lister = listing.ExpandedPatternLister()

# Count lister, just lists top level test instances with the number of
# suites & testcases.

count_lister = listing.CountLister()

# Sample output:

# Primary: (2 suites, 6 testcases)
# Secondary: (1 suite, 102 testcases)


# Here is a list of filters, you can pass them to
# the test plan declaration below and see how they change the
# test listing output.

pattern_filter_1 = filtering.Pattern("Primary")
pattern_filter_2 = filtering.Pattern("*:*:test_c")

tag_filter_1 = filtering.Tags("client")
tag_filter_2 = filtering.Tags({"color": "blue"})

composite_filter_1 = pattern_filter_1 | pattern_filter_2
composite_filter_2 = (pattern_filter_1 & tag_filter_1) | tag_filter_2


@test_plan(
    name="Programmatic Listing Example",
    # You can replace this argument with the other listers defined above
    # to see different output formats.
    # test_lister=test_lister,
    test_lister=name_lister,
    # Comment out the arguments below to see how they affect the listing output.
    # test_filter=pattern_filter_1,
    # test_sorter=ordering.ShuffleSorter()
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Custom

Required files:

test_plan.py

#!/usr/bin/env python
"""
    This example shows how to implement a custom lister for
    displaying test context of a test plan.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.testing.listing import BaseLister


@testsuite
class Alpha(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags="server")
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": "blue"})
    def test_c(self, env, result):
        pass


@testsuite(tags="server")
class Beta(object):
    @testcase(tags="client")
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": "red"})
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "yellow")})
    def test_c(self, env, result):
        pass


@testsuite(tags="client")
class Gamma(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": ("yellow", "red")})
    def test_b(self, env, result):
        pass

    @testcase(parameters=list(range(100)))
    def test_c(self, env, result, val):
        pass


# To implement a custom lister, we need to inherit from `listing.BaseLister`
# override `get_output` method and return a string representation of
# the current test instance (e.g. multitest) and possibly its test
#  context, like suites & testcases


class ExampleLister(BaseLister):
    """
    Displays test instances and their suites (if available)
    along with number of testcases per suite in a single line.

    e.g.

        Multitest A: Foo (3 testcases), Bar (2 testcases)
        Multitest B: Baz (3 testcases)
    """

    def get_output(self, instance):
        if isinstance(instance, MultiTest):
            test_context = instance.test_context
            if test_context:
                suite_data = ", ".join(
                    [
                        "{suite_name} ({num_testcases} testcases)".format(
                            suite_name=suite.__class__.__name__,
                            num_testcases=len(testcases),
                        )
                        for suite, testcases in test_context
                    ]
                )
                return "{instance_name}: {suite_data}".format(
                    instance_name=instance.name, suite_data=suite_data
                )
        else:
            # Coming soon in future releases
            raise NotImplementedError


# Running this plan will print out the test information using the
# custom test lister we defined above.
@test_plan(name="Custom test lister example", test_lister=ExampleLister())
def main(plan):

    test1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    test2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(test1)
    plan.add(test2)


if __name__ == "__main__":
    sys.exit(not main())

Custom with commandline

Required files:

test_plan_commandline.py

#!/usr/bin/env python
"""
    This example shows how to implement a custom lister for
    displaying test context of a test plan.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.testing.listing import BaseLister, listing_registry


@testsuite
class Alpha(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags="server")
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": "blue"})
    def test_c(self, env, result):
        pass


@testsuite(tags="server")
class Beta(object):
    @testcase(tags="client")
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": "red"})
    def test_b(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "yellow")})
    def test_c(self, env, result):
        pass


@testsuite(tags="client")
class Gamma(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase(tags={"color": ("yellow", "red")})
    def test_b(self, env, result):
        pass

    @testcase(parameters=list(range(100)))
    def test_c(self, env, result, val):
        pass


# To implement a custom lister, we need to inherit from `listing.BaseLister`
# override `get_output` method and return a string representation of
# the current test instance (e.g. multitest) and possibly its test
#  context, like suites & testcases.
#
# To use in the commandline add NAME and DESCRIPTION and register with
# listing_registry


class HelloWorldLister(BaseLister):
    """
    Displays 'Hello World" for each MultiTest

    e.g.

        Hello World: Primary
        Hello World: Secondary
    """

    NAME = "HELLO_WORLD"
    DESCRIPTION = "This lister print Hello World for each multitest"

    def get_output(self, instance):
        return "Hello World: {}".format(instance.name)


listing_registry.add_lister(HelloWorldLister())


# use --info hello-world to see the action
#
# it is also there in the --help text
#
#   --info TEST_INFO      (default: None)
#                         "pattern" - List tests in `--patterns` / `--tags` compatible format.
#                                 Max 25 testcases per suite will be displayed
#                         "name" - List tests in readable format.
#                                 Max 25 testcases per suite will be displayed
#                         "pattern-full" - List tests in `--patterns` / `--tags` compatible format.
#                         "name-full" - List tests in readable format.
#                         "count" - Lists top level instances and total number of suites & testcases per instance.
#                         "hello-world" - This lister print Hello World for each multitest
@test_plan(name="Custom test lister example")
def main(plan):
    test1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    test2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(test1)
    plan.add(test2)


if __name__ == "__main__":
    sys.exit(not main())

Ordering

Basic Ordering $ Shuffling

Required files:

test_plan_command_line.py

#!/usr/bin/env python
"""
This example shows how the run order for your suites / testcases
can be configured via command line options.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite
class Alpha(object):
    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Gamma(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


# You can try running the current script with the sample arguments below
# to see how the tests can be shuffled / sorted via command line arguments.

# Just shuffle the testcases, keep original ordering of suites.
# command line: `--shuffle testcases`

# Shuffle the suites only, using seed value of 15
# command line: `--shuffle suites --shuffle-seed 15`

# Shuffle suites and testcases (suites, testcases)
# command line: `--shuffle suites testcases`


@test_plan(
    name="Test Ordering / Shuffling basics (Command line)",
    # Using testcase level stdout so we can see sorted testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

test_plan_programmatic.py

#!/usr/bin/env python
"""
This example shows how the run order for your tests / suites / testcases
can be configured programmatically.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.ordering import (
    NoopSorter,
    ShuffleSorter,
    AlphanumericSorter,
    SortType,
)


@testsuite
class Alpha(object):
    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Gamma(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


# This is the sorter that's used by default:
# Test cases are run in their original declaration order.
# Test suites are run in the order they are added to a multitest.
# Multitests (instances) are run in the order they are added to the plan.

noop_sorter = NoopSorter()


# You can shuffle your test runs by using the built-in ShuffleSorter.
# This is advised as a good practice in case you are running testcases in
# parallel and they have race conditions.

# Just shuffle the testcases, keep original ordering of suites.
testcase_shuffler_a = ShuffleSorter("testcases")
testcase_shuffler_b = ShuffleSorter(SortType.TEST_CASES)


# Shuffle the suites only, using seed value of 15
suite_shuffler_a = ShuffleSorter(shuffle_type="suites", seed=15)
suite_shuffler_b = ShuffleSorter(shuffle_type=SortType.SUITES, seed=15)


# Shuffle suites & testcases
suite_testcase_shuffler_a = ShuffleSorter(("suites", "testcases"))
suite_testcase_shuffler_b = ShuffleSorter(
    shuffle_type=(SortType.SUITES, SortType.TEST_CASES)
)


# There is another built-in sorter that sorts the tests alphabetically:
testcase_alphanumeric_sorter_a = AlphanumericSorter("testcases")
suite_alphanumeric_sorter = AlphanumericSorter("suites")
suite_testcase_alphanumeric_sorter = AlphanumericSorter(
    ("suites", "testcases")
)


# Replace the `test_sorter` argument with the
# sorters / shufflers declared above to see how they work.


@test_plan(
    name="Test Ordering / Shuffling basics (Programmatic)",
    test_sorter=noop_sorter,
    # Using testcase level stdout so we can see sorted testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Custom Sorters

Required files:

test_plan.py

#!/usr/bin/env python
"""
This example shows how to implement a custom sorter class.
"""
import operator
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.ordering import NoopSorter, TypedSorter


@testsuite
class Alpha(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase
    def test_ab(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase
    def test_ab(self, env, result):
        pass

    @testcase
    def test_abc(self, env, result):
        pass


@testsuite
class Epsilon(object):
    @testcase
    def test_a(self, env, result):
        pass

    @testcase
    def test_ab(self, env, result):
        pass

    @testcase
    def test_abc(self, env, result):
        pass


# We inherit from TypedSorter so we can apply
# optional sorting per group (testcases, testsuites etc)
class ReverseNameLengthSorter(TypedSorter):
    """
    This sorter sorts tests from longest name length to shortest.
    """

    def reverse_sort_by_name(self, items, name_getter):
        return sorted(
            items, reverse=True, key=lambda item: len(name_getter(item))
        )

    # We override sort functions for each sort case:
    # Multitests -> sort_instances
    # Test Suites -> sort_testsuites
    # Test cases -> sort_testcases
    def sort_instances(self, instances):
        return self.reverse_sort_by_name(
            instances, operator.attrgetter("name")
        )

    def sort_testsuites(self, testsuites):
        return self.reverse_sort_by_name(
            testsuites, operator.attrgetter("name")
        )

    def sort_testcases(self, testcases):
        return self.reverse_sort_by_name(
            testcases, operator.attrgetter("name")
        )


noop_sorter = NoopSorter()

custom_sorter_1 = ReverseNameLengthSorter(sort_type="testcases")

custom_sorter_2 = ReverseNameLengthSorter(sort_type=("suites", "testcases"))


# Replace the `test_sorter` argument with the
# custom sorters declared above to see how they work.
@test_plan(
    name="Custom Sorter Example",
    test_sorter=noop_sorter,
    # Using testcase level stdout so we can see sorted testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(
        name="Primary", suites=[Alpha(), Beta(), Epsilon()]
    )

    plan.add(multi_test_1)


if __name__ == "__main__":
    sys.exit(not main())

Multi-level Ordering

Required files:

test_plan.py

#!/usr/bin/env python
"""
This example shows how different sorting logic can be applied
on different testing levels (e.g. plan, multitest)
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.ordering import ShuffleSorter, AlphanumericSorter


@testsuite
class Alpha(object):
    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Zeta(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


@testsuite
class Gamma(object):
    @testcase
    def test_c(self, env, result):
        pass

    @testcase
    def test_b(self, env, result):
        pass

    @testcase
    def test_a(self, env, result):
        pass


# We have a plan level test sorter that will sort the tests alphabetically
# However on Multitest('Primary') we have an explicit `test_sorter` argument
# which will take precedence and shuffle the tests instead.
@test_plan(
    name="Multi-level Test ordering",
    test_sorter=AlphanumericSorter("all"),
    # Using testcase level stdout so we can see sorted testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(
        name="Primary",
        test_sorter=ShuffleSorter("all"),
        suites=[Alpha(), Beta()],
    )

    multi_test_2 = MultiTest(name="Secondary", suites=[Zeta(), Gamma()])

    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Parametrization

Required files:

test_plan.py

#!/usr/bin/env python
"""
This example shows how to use the parametrization
feature of `@testcase` decorator.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite
class SimpleTest(object):

    # This will generate 4 new testcase methods, using a tuple for each one.
    @testcase(
        parameters=((5, 5, 10), (3, 2, 5), (0, 0, 0), ("foo", "bar", "foobar"))
    )
    def addition(self, env, result, a, b, expected):
        result.equal(a + b, expected)
        # Parametrization context for the generated testcases will be:
        # result.equal(5 + 5, 10)
        # result.equal(3 + 2, 5)
        # result.equal(0 + 0, 0)()
        # result.equal('foo' + 'bar', 'foobar')

    # Combinatorial parametrization example
    # Associativity check of addition operation, (a + b = b + a)
    # This will generate 25 (5 x 5) methods.
    @testcase(
        parameters={
            "a": [1, 10, -5, -3.2, 3e12],
            "b": [0, 42, 4.2, -0.231, 5.5e5],
        }
    )
    def addition_associativity(self, env, result, a, b):
        # It's a good practice to generate a description
        # with the parametrized arguments as well.
        # So that you can have more context when you inspect the test report.
        result.equal(
            actual=a + b,
            expected=b + a,
            description="{a} + {b} == {b} + {a}".format(a=a, b=b),
        )

        # Generated testcases will have the following contexts:
        # result.equal(1 + 0, 0 + 1, ...)
        # result.equal(10 + 0, 0 + 10, ...)
        # result.equal(-5 + 0, 0 + -5, ...)
        # ...
        # ...
        # result.equal(3e12 + -.231, 3e12 + -.231, ...)
        # result.equal(3e12 + 5.5e5, 3e12 + 5.5e5, ...)

    # Shortcut notation that uses single values
    # for single argument parametrization
    # Assigns 1, 2, 3, 4 to `value` for each generated test case
    # Verbose notation would be
    # `parameters=((2,), (4,), (6,), (8,))` which is not that readable.
    @testcase(
        parameters=(
            2,  # first testcase
            4,  # second testcase
            6,  # third testcase
            8,  # fourth testcase
        )
    )
    def is_even(self, env, result, value):
        result.equal(value % 2, 0)


# The example below makes use of a custom name
# generation function for parametrization.

# This way we can come up with more readable testcase
# method names on the test reports.

# If we didn't use a custom name function, we'd end up with method name
# like `func_raises_error <func=.., error=...>`, but instead, the custom
# function will give us names like `func_raises_error__ValueError`.


def custom_error_name_func(func_name, kwargs):
    """Disregard `func` argument, use the error only."""
    return "{func_name}__{error_type}".format(
        func_name=func_name, error_type=kwargs["error"].__name__
    )


@testsuite
class ErrorTest(object):

    # The lambda functions in the parameters below try to
    # execute invalid Python code that raises certain errors.
    # The parametrized test method checks if the function
    # raises the expected error when it is run.
    # This will generate 5 methods, for each item in the tuple.
    @testcase(
        parameters=(
            # tuple notation, using default error value (TypeError)
            (lambda: "foo" + 5,),
            (lambda: object().b, AttributeError),
            (lambda: {"a": 5}["b"], KeyError),
            (lambda: int("a"), ValueError),
            (lambda: 10 / 0, ZeroDivisionError),
        ),
        # comment out the line below line to see how
        # Testplan falls back to simple method names with integer suffixes
        name_func=custom_error_name_func,
    )
    def func_raises_error(self, env, result, func, error=TypeError):
        with result.raises(error):
            func()


# This function returns the value of the product directly
# which will be interpreted as a simple tag.
def simple_tag_func(kwargs):
    return kwargs["product"].title()


# This function returns a dictionary that is interpreted as a named tag.
def named_tag_func(kwargs):
    return {"product": kwargs["product"].title()}


@testsuite
class ProductTest(object):
    """Sample testsuite that demonstrates how `tag_func` works."""

    @testcase(
        tags={"category": "CategoryA"},
        parameters=((2, 3, "productA"), (3, 4, "productB")),
        tag_func=simple_tag_func,
    )
    def simple_tag_func_test(self, env, result, a, b, product):
        result.true(True)

    @testcase(
        tags={"category": "CategoryB"},
        parameters=((2, 3, "productA"), (3, 4, "productB")),
        tag_func=named_tag_func,
    )
    def named_tag_func_test(self, env, result, a, b, product):
        result.true(True)


# Discard the original docstring, convert kwargs to str
def kwargs_to_string(docstring, kwargs):
    return "\n".join([docstring, str(kwargs)])


# Use the original docstring, formatting
# it using kwargs via string interpolation.

# e.g. `foo: {foo}, bar: {bar}`.format(foo=2, bar=5)` -> 'foo: 2, bar: 5'
def interpolate_docstring(docstring, kwargs):
    return docstring.format(**kwargs)


@testsuite
class DocStringTest(object):
    @testcase(
        parameters=((2, 3, 5), (5, 10, 15)), docstring_func=kwargs_to_string
    )
    def addition_one(self, env, result, first, second, expected):
        """Test addition of two numbers."""
        return result.equal(first + second, expected)

    @testcase(
        parameters=((2, 3, 5), (5, 10, 15)),
        docstring_func=interpolate_docstring,
    )
    def addition_two(self, env, result, first, second, expected):
        """
        Testing addition with: {first} + {second}
        Expected value: {expected}
        """
        return result.equal(first + second, expected)


@test_plan(
    name="Parametrization Example",
    # Using detailed assertions so we can
    # see testcase context for generated testcases
    stdout_style=Style("assertion-detail", "assertion-detail"),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Primary",
            suites=[SimpleTest(), ErrorTest(), ProductTest(), DocStringTest()],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())

PDF report

Summary report with no assertion details.

../_images/parametrization_example.png

Tagging and Filtering

Basic Filters

Required files:

test_plan_command_line.py

#!/usr/bin/env python
"""
This example shows:

* How the tests, test cases and test suites can be tagged.

* How tests / suites/ testcases can be filtered by
  patterns and tags via command line options.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


# A suite with no tags, will be filtered out if we apply any tag based filters
@testsuite
class Alpha(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


# A suite with testcase level tags only.
@testsuite
class Beta(object):

    # A testcase tagged with a simple tag: `server`
    # This is a shortcut notation for {'simple': 'server'}
    @testcase(tags="server")
    def test_1(self, env, result):
        pass

    # A testcase tagged with a named (`color`) tag: `blue`
    @testcase(tags={"color": "blue"})
    def test_2(self, env, result):
        pass

    # A testcase tagged with both simple and named tag
    @testcase(tags={"simple": "server", "color": "blue"})
    def test_3(self, env, result):
        pass


# A suite with class level tags, these class level tags
#  will be propagated to each test case as well.
@testsuite(tags=("server", "client"))
class Gamma(object):
    @testcase(tags={"color": "red"})
    def test_1(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "red")})
    def test_2(self, env, result):
        pass

    @testcase(tags={"color": "yellow"})
    def test_3(self, env, result):
        pass


# You can run the current Testplan script with the arguments below to see
# how command line filtering works.


# Run all Multitests named `Primary` and all of its suites & testcases.
# command line: `--patterns Primary`


# Run `Alpha` suite (and all testcases) from `Primary` multitest.
# command line: `--patterns Primary:Alpha`


# Run `Alpha.test_1` from `Primary` multitest.
# command line: `--patterns Primary:Alpha:test_1`


# Run all testcases named `test_1` from all suites & multitests.
# command line: `--patterns '*:*:test_1`


# Multi-pattern filtering, runs multitests with names `Primary` and `Secondary`
# command line: `--patterns Primary Secondary`
# command line (alternative) : --patterns Primary --patterns Secondary


# Run all multitests that end with `ary` (Primary & Secondary)
# command line: --patterns *ary


# Tag based filtering, runs all testcases that are tagged with `server`.
# Suite level tags propagate to testcases as well.
# command line: `--tags server`


# Run all testcases with the named tag: `color = blue`
# command line: `--tags color=blue`


# Multi tag filtering, run all testcases tagged with `server` OR `client`.
# command line: `--tags server client`
# command line (alt.): `--tags server --tags client`


# Multi tag filtering, run all testcases tagged with
#  `server` OR `color = red` OR `color = blue`
# command line: `--tags server color=red,blue
# command line (alt.): `--tags server --tags color=red,blue`
# command line (alt. 2): `--tags server --tags color=red --tags color=blue`


# Multi tag filtering, run all testcases tagged with `server` AND `client`.
# command line: `--tags-all server client`


@test_plan(
    name="Tagging & Filtering (Command line)",
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(
        name="Primary", suites=[Alpha(), Beta()], tags={"color": "white"}
    )

    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

test_plan_programmatic.py

#!/usr/bin/env python
"""
This example shows:

* How test instances (e.g. multitest), test cases and test suites can be tagged.

* How tests / suites/ testcases can be filtered
  by patterns and tags programmatically.

"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.filtering import Filter, Pattern, Tags, TagsAll


# A suite with no tags, can still inherit tag data
# if it is added to a multitest with tags.
@testsuite
class Alpha(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


# A suite with testcase level tags only.
@testsuite
class Beta(object):

    # A testcase tagged with a simple tag: `server`
    # This is a shortcut notation for {'simple': 'server'}
    @testcase(tags="server")
    def test_1(self, env, result):
        pass

    # A testcase tagged with a named (`color`) tag: `blue`
    @testcase(tags={"color": "blue"})
    def test_2(self, env, result):
        pass

    # A testcase tagged with both simple and named tag
    @testcase(tags={"simple": "server", "color": "blue"})
    def test_3(self, env, result):
        pass


# A suite with class level tags, these class level tags
#  will be propagated to each test case as well.
@testsuite(tags=("server", "client"))
class Gamma(object):
    @testcase(tags={"color": "red"})
    def test_1(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "red")})
    def test_2(self, env, result):
        pass

    @testcase(tags={"color": "yellow"})
    def test_3(self, env, result):
        pass


# Default (noop) filter, runs all tests
default_filter = Filter()

# Run all Multitest named `Primary` and all of its suites & testcases.
pattern_filter_1 = Pattern("Primary")

# Run `Alpha` suite (and all testcases) from `Primary` multitest.
pattern_filter_2 = Pattern("Primary:Alpha")

# Run `Alpha.test_1` from `Primary` multitest.
pattern_filter_3 = Pattern("Primary:Alpha:test_1")

# Run all testcases named `test_1` from all suites & multitests.
pattern_filter_4 = Pattern("*:*:test_1")

# Multi-pattern filtering, runs multitests with names `Primary` and `Secondary`
pattern_filter_5 = Pattern.any("Primary", "Secondary")

# Run all multitests that end with `ary` (Primary & Secondary)
pattern_filter_6 = Pattern("*ary")

# Tag based filtering, runs all testcases that are tagged with `server`.
# Suite level tags propagate to testcases as well.
tag_filter_1 = Tags("server")

# Run all testcases with the named tag: `color = blue`
tag_filter_2 = Tags({"color": "blue"})

# Multi tag filtering, run all testcases tagged with `server` OR `client`.
tag_filter_3 = Tags(("server", "client"))

# Multi tag filtering, run all testcases tagged with
#  `server` OR `color = red` OR `color = blue`
tag_filter_4 = Tags({"simple": "server", "color": ("red", "blue")})

# Multi tag filtering, run all testcases tagged with `server` AND `client`.
tag_filter_5 = TagsAll(("server", "client"))

# Run all tests that are tagged with `color` = `white`.
# None of the suite classes and their testcases have such tag,
# however in the plan declaration below we use a multitest level tag
# for `multi_test_1`, which propagates `color` = `white` to the instances of
# Alpha and Beta suites (and to their testcases). This only affects the
# instances of the suites and the original classes' tag indices
# remain unchanged.
tag_filter_6 = Tags({"color": "white"})

# Replace the `test_filter` argument with the
# filters declared above to see how they work.
@test_plan(
    name="Tagging & Filtering (Programmatic)",
    test_filter=default_filter,
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(
        name="Primary", suites=[Alpha(), Beta()], tags={"color": "white"}
    )
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Composite Filters

Required files:

test_plan_command_line.py

#!/usr/bin/env python
"""
This example shows how test filters can be composed via command line arguments.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style


@testsuite
class Alpha(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase(tags="server")
    def test_1(self, env, result):
        pass

    @testcase(tags={"color": "blue"})
    def test_2(self, env, result):
        pass

    @testcase(tags={"simple": "server", "color": "blue"})
    def test_3(self, env, result):
        pass


@testsuite(tags=("server", "client"))
class Gamma(object):
    @testcase(tags={"color": "red"})
    def test_1(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "green")})
    def test_2(self, env, result):
        pass

    @testcase(tags={"color": "yellow"})
    def test_3(self, env, result):
        pass


@testsuite
class Delta(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


# Composite filtering via command line arguments currently support
# tag and pattern based filtering with some limitations:

# OR composition between different filtering categories (e.g. Tag & Pattern)
# is not supported on command line filtering.
# This means when `--tags` and `--patterns` are used together, only
# the tests that match BOTH filters will be run.

# AND composition between same filtering categories
# (e.g. Tag + Tag, Pattern + Pattern) is not supported on
# command line filtering.

# This means when `--tags server` and `--tags client` are used together,
# tests that match ANY of these rules will be run.

# `Not` meta filter is not supported via command line options, you need
# to rely on programmatic filtering to make use of this feature.


# You can run the current Testplan script with the sample command line
# arguments below to see how command line filtering works:


# Run tests tagged with `color = red` OR `color = yellow`
# OR tagged with `server` AND `color = blue`
# command line: `--tags color=red,yellow --tags-all server color=blue`


# Run tests that have the name `test_2` and are tagged with `color = blue`
# command line: `--patterns *:*:test_2 --tags color=blue`


# Run all tests: tagged with `server`
# AND (belong to `Gamma` multitest OR has the name `test_3`)
# command line: `--tags server --pattern Gamma *:*:test_3`
# command line (alt.): `--tags server --pattern Gamma --pattern *:*:test_3`


@test_plan(
    name="Composite Filters (Command line)",
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    multi_test_3 = MultiTest(name="Other", suites=[Delta()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)
    plan.add(multi_test_3)


if __name__ == "__main__":
    sys.exit(not main())

test_plan_programmatic.py

#!/usr/bin/env python
"""
This example shows:

* How test filters can be composed by using bitwise
  operators or meta filters programmatically.

* How to build complex filtering logic with filter compositions.

"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.filtering import Pattern, Tags, TagsAll, Not, And, Or


@testsuite
class Alpha(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase(tags="server")
    def test_1(self, env, result):
        pass

    @testcase(tags={"color": "blue"})
    def test_2(self, env, result):
        pass

    @testcase(tags={"simple": "server", "color": "blue"})
    def test_3(self, env, result):
        pass


@testsuite(tags=("server", "client"))
class Gamma(object):
    @testcase(tags={"color": "red"})
    def test_1(self, env, result):
        pass

    @testcase(tags={"color": ("blue", "green")})
    def test_2(self, env, result):
        pass

    @testcase(tags={"color": "yellow"})
    def test_3(self, env, result):
        pass


@testsuite
class Delta(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


# You can use meta filters or bitwise operators to create filter compositions:

# Bitwise OR operator (`|`) or `Or` meta filter creates a new
# filter that runs tests that pass for any of the composed filters.

# E.g. test_filter_a | test_filter_b == Or(test_filter_a, test_filter_b)

# Run tests tagged with `color = red` OR `color = yellow`
# OR tagged with `server` AND `color = blue`

composite_filter_1_a = Tags({"color": ("red", "yellow")}) | TagsAll(
    {"simple": "server", "color": "blue"}
)

composite_filter_1_b = Or(
    Tags({"color": ("red", "yellow")}),
    TagsAll({"simple": "server", "color": "blue"}),
)


# Run tests that belong to multitest named `Primary` or tagged with `server`
# categories (Pattern, Tag etc) is not supported via cmdline.

composite_filter_2_a = Pattern("Primary") | Tags("server")
composite_filter_2_b = Or(Pattern("Primary"), Tags("server"))


# Bitwise AND operator (`&`) or `And` meta filter creates a new filter that
# runs tests that pass all of the composed filters.

# Run tests that have the name `test_2` and are tagged with `color = blue`

composite_filter_3_a = Pattern("*:*:test_2") & Tags({"color": "blue"})
composite_filter_3_b = And(Pattern("*:*:test_2"), Tags({"color": "blue"}))


# Bitwise negation (`~`) or `Not` meta filter creates a new filter that
# runs tests that fail the original filter.

# Run tests that do not have the name `test_1`

composite_filter_4_a = ~Pattern("*:*:test_1")
composite_filter_4_b = Not(Pattern("*:*:test_1"))


# Meta filters can be composed as well, which allow us
# to create complex filtering rules:

# Run all tests: tagged with `server`
# AND (belong to `Gamma` multitest OR has the name `test_3`)

composite_filter_5_a = Tags("server") & (
    Pattern("Gamma") | Pattern("*:*:test_3")
)

composite_filter_5_b = And(
    Tags("server"), Or(Pattern("Gamma"), Pattern("*:*:test_3"))
)

# Run all testcases except the ones that are tagged
# with `color = blue` OR has the name `test_1`.

composite_filter_6_a = ~(Tags({"color": "blue"}) | Pattern("*:*:test_1"))
composite_filter_6_b = Not(Or(Tags({"color": "blue"}), Pattern("*:*:test_1")))


# Replace the `test_filter` argument with the
# filters declared above to see how they work.


@test_plan(
    name="Composite Filters (Programmatic)",
    test_filter=composite_filter_1_a,
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(name="Primary", suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    multi_test_3 = MultiTest(name="Other", suites=[Delta()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)
    plan.add(multi_test_3)


if __name__ == "__main__":
    sys.exit(not main())

Custom Filters

Required files:

test_plan.py

#!/usr/bin/env python
"""
This example shows how you can implement custom filtering logic for your tests.
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.filtering import Filter, Pattern


def check_priority(value):
    """Validator for priority values."""
    assert (
        isinstance(value, int) and value > 0
    ), "Priority must be positive integer."


def priority(value):
    """Decorator that sets priority value for unbound testcase methods."""
    check_priority(value)

    def wrapper(func):
        func.priority = value
        return func

    return wrapper


class BaseSuite(object):
    """Base suite class for suite level custom filtering demonstration."""

    pass


@testsuite
class Alpha(BaseSuite):
    @priority(1)
    @testcase
    def test_1(self, env, result):
        pass

    @priority(5)
    @testcase
    def test_2(self, env, result):
        pass

    @priority(4)
    @testcase
    def test_3(self, env, result):
        pass


@testsuite
class Beta(BaseSuite):
    @priority(1)
    @testcase
    def test_1(self, env, result):
        pass

    @priority(3)
    @testcase
    def test_2(self, env, result):
        pass

    @testcase
    def test_3(self, env, result):
        pass


@testsuite
class Gamma(object):
    @testcase
    def test_1(self, env, result):
        pass

    @priority(2)
    @testcase
    def test_2(self, env, result):
        pass

    @priority(1)
    @testcase
    def test_3(self, env, result):
        pass


class PriorityFilter(Filter):
    """
    Filters testcases with a priority
    that falls between the given interval.
    """

    def __init__(self, minimum, maximum=None):
        check_priority(minimum)
        if maximum is not None:
            check_priority(maximum)

        self.minimum = minimum
        self.maximum = maximum

    def filter_case(self, case):
        if not hasattr(case, "priority"):
            return False

        if self.maximum is not None:
            return self.minimum <= case.priority <= self.maximum
        return self.minimum <= case.priority


class SubclassFilter(Filter):
    """
    Suite level filter that runs suites
    that inherit from the given base class.
    """

    def __init__(self, base_kls):
        assert isinstance(
            base_kls, type
        ), "`base_kls` must be of type" " `type`, it was: {}".format(
            type(base_kls)
        )

        self.base_kls = base_kls

    def filter_suite(self, suite):
        return isinstance(suite, self.base_kls)


# Run test cases that have a minimum priority of 5
priority_filter_1 = PriorityFilter(minimum=5)

# Run test cases that have a priority between 1 and 3 (inclusive)
priority_filter_2 = PriorityFilter(minimum=1, maximum=3)

# Run test suites that inherit from BaseSuite class.
subclass_filter = SubclassFilter(BaseSuite)

# Custom filters can be composed as well:

# Run test cases that:
# have a minimum priority of 5
# OR have a priority between 1 and 3 (inclusive)
composed_filter_1 = priority_filter_1 | priority_filter_2


# Run test cases that:
# Belong to a suite that inherits from BaseSuite
# AND (have a minimum priority of 5 OR have a priority between 1 and 3)
composed_filter_2 = subclass_filter & composed_filter_1


# We can also compose custom filters with the built-in filters as well:
# Run test cases that:
# Belong to suites that inherit from BaseSuite
# AND have the name `test_2`
composed_filter_3 = subclass_filter & Pattern("*:*:test_2")


# Replace the `test_filter` argument with the
# filters declared above to see how they work.


@test_plan(
    name="Custom Test Filters",
    test_filter=priority_filter_1,
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test = MultiTest(name="Sample", suites=[Alpha(), Beta(), Gamma()])

    plan.add(multi_test)


if __name__ == "__main__":
    sys.exit(not main())

Multi level Filtering

Required files:

test_plan.py

#!/usr/bin/env python
"""
This example shows how you can apply different test filters on different levels
(e.g. plan, multitest level)
"""
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style
from testplan.testing.filtering import Pattern


@testsuite
class Alpha(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass


@testsuite
class Beta(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass

    @testcase
    def test_3(self, env, result):
        pass


@testsuite
class Gamma(object):
    @testcase
    def test_1(self, env, result):
        pass

    @testcase
    def test_2(self, env, result):
        pass

    @testcase
    def test_3(self, env, result):
        pass


# In the example below, we have plan level test filter that will run
# test cases that have the name `test_3` only.
#
# However on Multitest('Primary') we also have another test filter that
# will run test cases with the name `test_1`. This filter will take precedence
# over the plan level filter.


@test_plan(
    name="Multi-level Filtering",
    test_filter=Pattern("*:*:test_3"),
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test_1 = MultiTest(
        name="Primary",
        suites=[Alpha(), Beta()],
        test_filter=Pattern("*:*:test_1"),
    )
    multi_test_2 = MultiTest(name="Secondary", suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == "__main__":
    sys.exit(not main())

Parallel

Basic

Required files:

test_plan.py

#!/usr/bin/env python
"""
Example script to demonstrate parallel test execution of a MultiTest.
"""
import sys

from testplan import test_plan
from testplan.report.testing.styles import Style, StyleEnum

OUTPUT_STYLE = Style(StyleEnum.ASSERTION_DETAIL, StyleEnum.ASSERTION_DETAIL)


@test_plan(
    name="ParallelMultiTest",
    pdf_path="report.pdf",
    stdout_style=OUTPUT_STYLE,
    pdf_style=OUTPUT_STYLE,
)
def main(plan):
    """
    Testplan decorated main function. Adds a single parallel MultiTest to the
    test plan.

    :param plan: Plan to add MultiTest to.
    :return: Results of tests.
    """
    plan.schedule(target="make_multitest", module="parallel_tasks")


if __name__ == "__main__":
    sys.exit(main().exit_code)

parallel_tasks.py

"""Example test suite to demonstrate grouped parallel MultiTest execution."""
import threading

from testplan.testing.multitest import MultiTest
from testplan.testing.multitest.suite import testsuite, testcase
from testplan.common.utils import thread as thread_utils

import resource_manager


@testsuite
class SampleTest(object):
    """
    Example test suite. The test cases are split into two different execution
    groups. Only tests from the same group will be executed in parallel with
    each other - the groups overall are executed serially. To demonstrate
    this, each test acquires one of two resources that cannot both be acquired
    in parallel.

    You will find that modifying a single test from the "first" group to acquire
    the "second" resource (or vice-versa) will cause the test to fail.

    NOTE: when running a parallel MultiTest, all testcases from a given
    execution group are run together, regardless of the order they are defined
    within the testsuite class. Each execution group is run separately from all
    others. This is in contrast to the default serial mode, where testcases
    are run serially in the order they are defined within the testsuite class.
    """

    def __init__(self):
        # A Barrier is a synchronisation primitive which allows a fixed number
        # of threads (in our case, 2) to wait for each other. We use it here
        # to demonstrate that testcases are run concurrently and how they may
        # be synchronised with each other.
        #
        # Note that on Python 3 you can use the Barrier class from the standard
        # library:
        # https://docs.python.org/3.7/library/threading.html#barrier-objects .
        # Here we use a backported Barrier provided by Testplan, which works
        # on both Python 2 and 3.
        self._barrier = thread_utils.Barrier(2)

        # The Event synchronisation primitive allows one thread to signal to
        # another that is waiting on the first thread to do some work. We use
        # it here to demonstrate another way testcases within the same
        # execution group may be synchronised with each other.
        self._test_g2_1_done = threading.Event()

    @testcase(execution_group="first")
    def test_g1_1(self, env, result):
        """
        Wait for test_g1_2 to also acquire the first resource. Assert that the
        refcount is 2.
        """
        self._test_g1_impl(env, result)

    @testcase(execution_group="second")
    def test_g2_1(self, env, result):
        """Assert that no other test holds the second resource."""
        with env.resources["second"] as res:
            result.true(res.active)
            result.equal(res.refcount, 1)
        self._test_g2_1_done.set()

    @testcase(execution_group="first")
    def test_g1_2(self, env, result):
        """
        Mirror image of test_g1_1. We wait for test_g1_1 to acquire the first
        resource while running in another thread, then assert that the refcount
        is 2.
        """
        self._test_g1_impl(env, result)

    @testcase(execution_group="second")
    def test_g2_2(self, env, result):
        """Wait for test_g2_1 to release the resource before acquiring it."""
        self._test_g2_1_done.wait()

        with env.resources["second"] as res:
            result.true(res.active)
            result.equal(res.refcount, 1)

    def _test_g1_impl(self, env, result):
        """
        Implementation of test_g1 testcases. Both testcases use the same logic
        but are run concurrently in separate threads.
        """
        with env.resources["first"] as res:
            result.true(res.active)

            # Wait for both threads to acquire the resource.
            self._barrier.wait()

            # Both threads have acquired the resource - check that the refcount
            # is 2.
            result.equal(res.refcount, 2)

            # Wait for both threads to check the refcount before releasing the
            # resource.
            self._barrier.wait()


def make_multitest():
    """
    Callable target to build a MultiTest. The `thread_pool_size` argument
    instructs Testplan to create a thread pool for running the MultiTest
    testcases.
    """
    return MultiTest(
        name="Testcase Parallezation",
        suites=[SampleTest()],
        thread_pool_size=2,
        environment=[
            resource_manager.ExclusiveResourceManager(name="resources")
        ],
    )

resource_manager.py

"""
Example of a custom driver, that manages several resources. Only a single
resource may be "acquired" at a time, however that same resource may be
acquired multiple times. The manager enforces this logic.
"""
import collections
import functools
import threading

from testplan.testing.multitest import driver


class ExclusiveResourceManager(driver.Driver):
    """
    Driver which manages several resources. Only one resource may be active at a
    time.

    This is only a contrived example to demonstrate the grouping of parallel
    tests execution - not a suggested pattern for managing resources.
    """

    RESOURCE_NAMES = ("first", "second")

    def __init__(self, **kwargs):
        self._refcounts_mutex = threading.Lock()
        self._refcounts = collections.Counter()
        self._resources = {}
        for name in self.RESOURCE_NAMES:
            self.add_resource(name)

        super(ExclusiveResourceManager, self).__init__(**kwargs)

    def __getitem__(self, item):
        """Provide access to the resources."""
        return self._resources[item]

    def add_resource(self, name):
        """Add a named resource."""
        self._resources[name] = _AcquirableResource(
            acquire_callback=functools.partial(self._acquire, name),
            release_callback=functools.partial(self._release, name),
            refcount_callback=functools.partial(self._refcount_cbk, name),
        )

    def _acquire(self, resource_name):
        """
        Check that no other resources are in use. Increment the usage refcount.
        """
        with self._refcounts_mutex:
            if not all(
                count == 0
                for key, count in self._refcounts.items()
                if key != resource_name
            ):
                raise RuntimeError(
                    "Cannot acquire resource {} when other resources are in "
                    "use.".format(resource_name)
                )
            self._refcounts[resource_name] += 1

    def _release(self, resource_name):
        """Decrement the usage refcount."""
        with self._refcounts_mutex:
            assert self._refcounts[resource_name] > 0
            self._refcounts[resource_name] -= 1

    def _refcount_cbk(self, resource_name):
        """Return the current refcount for a given resource."""
        with self._refcounts_mutex:
            return self._refcounts[resource_name]


class _AcquirableResource(object):
    """A resource which may be acquired via a `with` context."""

    def __init__(self, acquire_callback, release_callback, refcount_callback):
        self._acquire_callback = acquire_callback
        self._release_callback = release_callback
        self._refcount_callback = refcount_callback

    def __enter__(self):
        """Report back that this resource has been acquired."""
        self._acquire_callback()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """Report back that this resource has been released."""
        self._release_callback()

    @property
    def active(self):
        """:return: whether the resource has been acquired."""
        return self.refcount > 0

    @property
    def refcount(self):
        """:return: the number of active references to this resource."""
        return self._refcount_callback()

Logging

Basic

test_plan_logging.py

import logging

from testplan import test_plan
from testplan.report import Status
from testplan.report.testing.styles import Style, StyleEnum
from testplan.testing.base import ASSERTION_INDENT
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.logging import (
    CaptureLevel,
    LogCaptureMixin,
    AutoLogCaptureMixin,
)


@testsuite
class LoggingSuite(LogCaptureMixin):
    """
    Demonstrate how logging can added to testcase and possibly captured in the result from test suite.
    Add LogCaptureMixin and self.logger will be available for logging. self.capture_log(result) can be
    used as a context manager to capture log in the result. It is possible to format the log as needed,
    and also to attach the captured log as a file.

    The log can be captured at 3 leveles, -TESTSUITE: only the logs logged through self.logger will be captured,
    -TESTPLAN: all testplan related loggs captured (so drivers logs will be included as well), -ROOT: all logs
    will be captured at the level the root logger is set normally WARNING
    """

    @testcase
    def testsuite_level(self, env, result):
        with self.capture_log(
            result
        ) as logger:  # as convenience the logger is returned but is is really the same as
            logger.info("Hello")
            self.logger.info("Logged as well")
            self.logger.parent.info("Not captured")
            logging.getLogger().warning("Not captured either")

    @testcase
    def testplan_level(self, env, result):
        with self.capture_log(
            result, capture_level=CaptureLevel.TESTPLAN
        ) as logger:
            logger.info("Hello")
            self.logger.info("Logged as well")
            self.logger.parent.info("Now captured")
            logging.getLogger().warning("Not captured either")

    @testcase
    def root_level(self, env, result):
        with self.capture_log(
            result, capture_level=CaptureLevel.ROOT
        ) as logger:
            logger.info("Hello")
            self.logger.info("Logged as well")
            self.logger.parent.info("Now captured")
            logging.getLogger().warning("This captured too")

    @testcase
    def attach(self, env, result):
        with self.capture_log(result, attach_log=True) as logger:
            logger.info("Attached Log")

    @testcase
    def format(self, env, result):
        with self.capture_log(
            result,
            format="%(asctime)-24s %(name)-50s %(levelname)-15s %(message)s",
        ) as logger:
            logger.info("Formatted")

    @testcase
    def multiple(self, env, result):
        with self.capture_log(result):
            self.logger.info("CaptureGroup 1")
            self.logger.error(
                "To have some color"
            )  # This level goes to stdout too

        # do an assertion to separate the blocks
        result.true(True, "This is so true")

        with self.capture_log(result):
            self.logger.info("CaptureGroup 2")
            self.logger.warning(
                "To have some color"
            )  # This level goes to stdout too

    @testcase
    def specials(self, env, result):
        with self.capture_log(result):
            self.logger.test_info("Test info log: goes to the console as well")
            self.logger.log_test_status(
                "A mandatory check", Status.PASSED, indent=ASSERTION_INDENT
            )


@testsuite
class AutoLoggingSuite(AutoLogCaptureMixin):
    """
    AutoLogCaptureMixin will automatically add captured log at the end of all testcase
    """

    @testcase
    def case(self, env, result):
        self.logger.info("Hello")

    @testcase
    def case2(self, env, result):
        self.logger.info("Do it for all the testcases")


@testsuite
class AutoLoggingSuiteThatAttach(AutoLogCaptureMixin):
    def __init__(self):
        super(AutoLoggingSuiteThatAttach, self).__init__()
        self.log_capture_config.attach_log = True

    @testcase
    def case(self, env, result):
        self.logger.info("Hello Attached")


@testsuite
class AutoLoggingSuiteThatFormat(AutoLogCaptureMixin):
    def __init__(self):
        super(AutoLoggingSuiteThatFormat, self).__init__()
        self.log_capture_config.format = (
            "%(asctime)-24s %(name)-50s %(levelname)-15s %(message)s"
        )

    @testcase
    def case(self, env, result):
        self.logger.info("Hello Formatted")


@test_plan(
    name="Logging",
    pdf_path="report.pdf",
    pdf_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Logging",
            suites=[
                LoggingSuite(),
                AutoLoggingSuite(),
                AutoLoggingSuiteThatAttach(),
                AutoLoggingSuiteThatFormat(),
            ],
        )
    )


if __name__ == "__main__":
    main()