Python

unittest

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""Example to demonstrate PyUnit integration with Testplan."""

import sys
import unittest

from testplan import test_plan
from testplan.testing import pyunit


def before_start(env, result):
    result.log("Executing before start hook.")


def after_start(env, result):
    result.log("Executing after start hook.")


def before_stop(env, result):
    result.log("Executing before stop hook.")


def after_stop(env, result):
    result.log("Executing after stop hook.")


class TestAlpha(unittest.TestCase):
    """
    Minimal PyUnit testcase with a single trivial test method. For more
    information about the unittest library, see the [documentation](
    http://docs.python.org/2/library/unittest.html).
    """

    def test_example(self):
        """Test with basic assertions."""
        self.assertTrue(True)
        self.assertFalse(False)


class TestBeta(unittest.TestCase):
    """
    Minimal PyUnit testcase with a single trivial test method. For more
    information about the unittest library, see the [documentation](
    http://docs.python.org/2/library/unittest.html).
    """

    def test_fails(self):
        """Test that fails."""
        self.assertTrue(False)

    def test_raises(self):
        """Test that raises an Exception."""
        raise RuntimeError("Testcase raises")


@test_plan(name="PyUnit Example", description="Python unittest example")
def main(plan):
    # Now we are inside a function that will be passed a plan object, we
    # can add tests to this plan. Here we will add a unittest suite, made up
    # of a single TestCase defined above.
    plan.add(
        pyunit.PyUnit(
            name="My PyUnit",
            description="PyUnit example testcase",
            testcases=[TestAlpha, TestBeta],
            before_start=before_start,
            after_start=after_start,
            before_stop=before_stop,
            after_stop=after_stop,
        )
    )


# Finally we trigger our main function when the script is run, and
# set the return status.
if __name__ == "__main__":
    res = main()
    sys.exit(res.exit_code)

pytest

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""Example to demonstrate PyTest integration with Testplan."""
import os
import sys

from testplan import test_plan
from testplan.common.utils.context import context
from testplan.testing import py_test
from testplan.testing.multitest.driver.tcp import TCPServer, TCPClient


def before_start(env, result):
    result.log("Executing before start hook.")


def after_start(env, result):
    result.log("Executing after start hook.")
    env.server.accept_connection()


def before_stop(env, result):
    result.log("Executing before stop hook.")


def after_stop(env, result):
    result.log("Executing after stop hook.")


# Specify the name and description of the testplan via the decorator.
@test_plan(name="PyTest Example", description="PyTest basic example")
def main(plan):
    # Since this function is decorated with `@test_plan`, the first
    # argument will be a `Testplan` instance, to which we attach out test
    # targets. Here we will add a PyTest instance which targets the tests
    # in pytest_basics.py.
    plan.add(
        py_test.PyTest(
            name="My PyTest",
            description="PyTest example - pytest basics",
            target=[
                os.path.join(os.path.dirname(__file__), "pytest_tests.py")
            ],
            environment=[
                TCPServer(name="server", host="localhost", port=0),
                TCPClient(
                    name="client",
                    host=context("server", "{{host}}"),
                    port=context("server", "{{port}}"),
                ),
            ],
            before_start=before_start,
            after_start=after_start,
            before_stop=before_stop,
            after_stop=after_stop,
        )
    )


# Finally we trigger our main function when the script is run, and
# set the return status.
if __name__ == "__main__":
    res = main()
    sys.exit(res.exit_code)

pytest_tests.py

"""Example test script for use by PyTest."""
# For the most basic usage, no imports are required.
# pytest will automatically detect any test cases based
# on methods starting with ``test_``.
import os

import pytest

from testplan.testing.result import Result


class TestPytestBasics:
    """
    Demonstrate the basic usage of PyTest. PyTest testcases can be declared
    as either plain functions or methods on a class. Testcase functions or
    method names must begin with "test" and classes containing testcases
    must being with "Test".

    Classes containing testcases must not define an __init__() method. The
    recommended way to perform setup is to make use of Testplan's Environment -
    see the "TestWithDrivers" example below. Pytest fixtures and the older
    xunit-style setup() and teardown() methods may also be used.
    """

    def test_success(self):
        """
        Trivial test method that will simply cause the test to succeed.
        Note the use of the plain Python assert statement.
        """
        assert True

    def test_failure(self):
        """
        Similar to above, except this time the test case will always fail.
        """
        print("test output")
        assert False

    @pytest.mark.parametrize("a,b,c", [(1, 2, 3), (-1, -2, -3), (0, 0, 0)])
    def test_parametrization(self, a, b, c):
        """Parametrized testcase."""
        assert a + b == c


class TestWithDrivers:
    """
    MultiTest drivers are also available for PyTest.
    The testcase can access those drivers by parameter `env`,
    and make assertions provided by `result`.
    """

    def test_drivers(self, env, result):
        """Testcase using server and client objects from the environment."""
        message = "This is a test message"
        env.server.accept_connection()
        size = env.client.send(bytes(message.encode("utf-8")))
        received = env.server.receive(size)
        result.log(
            "Received Message from server: {}".format(received),
            description="Log a message",
        )
        result.equal(
            received.decode("utf-8"), message, description="Expect a message"
        )


class TestWithAttachments:
    def test_attachment(self, result: Result):
        result.attach(__file__, "example attachment")


class TestPytestMarks:
    """
    Demonstrate the use of Pytest marks. These can be used to skip a testcase,
    or to run it but expect it to fail. Marking testcases in this way is a
    useful way to skip running them in situations where it is known to fail -
    e.g. on a particular OS or python interpreter version - or when a testcase
    is added before the feature is implemented in TDD workflows.
    """

    @pytest.mark.skip
    def test_skipped(self):
        """
        Tests can be marked as skipped and never run. Useful if the test is
        known to cause some bad side effect (e.g. crashing the python
        interpreter process).
        """
        raise RuntimeError("Testcase should not run.")

    @pytest.mark.skipif(os.name != "posix", reason="Only run on Linux")
    def test_skipif(self):
        """
        Tests can be conditionally skipped - useful if a test should only be
        run on a specific platform or python interpreter version.
        """
        assert os.name == "posix"

    @pytest.mark.xfail
    def test_xfail(self):
        """
        Tests can alternatively be marked as "xfail" - expected failure. Such
        tests are run but are not reported as failures by Testplan. Useful
        for testing features still under active development, or for unstable
        tests so that you can keep running and monitoring the output without
        blocking CI builds.
        """
        raise NotImplementedError("Testcase expected to fail")

    @pytest.mark.xfail(raises=NotImplementedError)
    def test_unexpected_error(self):
        """
        Optionally, the expected exception type raised by a testcase can be
        specified. If a different exception type is raised, the testcase will
        be considered as failed. Useful to ensure that a test fails for the
        reason you actually expect it to.
        """
        raise TypeError("oops")

    @pytest.mark.xfail
    def test_xpass(self):
        """
        Tests marked as xfail that don't actually fail are considered an
        XPASS by PyTest, and Testplan considers the testcase to have passed.
        """
        assert True

    @pytest.mark.xfail(strict=True)
    def test_xpass_strict(self):
        """
        If the strict parameter is set to True, tests marked as xfail will be
        considered to have failed if they pass unexpectedly.
        """
        assert True