Assertions

Basic

Required files:

test_plan_basic.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of assertions,
assertion groups and assertion namespaces.
"""
import sys

from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum

import matplotlib

matplotlib.use("agg")
import matplotlib.pyplot as plot


@testsuite
class SampleSuite:
    @testcase
    def test_log_html(self, env, result):
        result.log_html(
            """
<div style="font-size:80px;font-family:Arial;font-weight:bold;">
    <i class="fa fa-check-square" style="color:green;padding-right:5px;"></i>
    Testplan
</div>
        """,
            description="HTML example",
        )

    @testcase
    def test_basic_assertions(self, env, result):
        # Basic assertions contain equality, comparison, membership checks:
        result.equal("foo", "foo")  # The most basic syntax

        # We can pass description to any assertion method
        result.equal(1, 2, "Description for failing equality")

        result.not_equal("foo", "bar")
        result.greater(5, 2)
        result.greater_equal(2, 2)
        result.greater_equal(2, 1)
        result.less(10, 20)
        result.less_equal(10, 10)
        result.less_equal(10, 30)

        # We can access these assertions via shortcuts as well,
        # They will have the same names with the functions
        # in the built-in `operator` module
        result.eq(15, 15)
        result.ne(10, 20)
        result.lt(2, 3)
        result.gt(3, 2)
        result.le(10, 15)
        result.ge(15, 10)
        result.eq(
            "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
            "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
        )
        # We can test if 2 numbers are close to each other within
        # the relative tolerance or a minimum absolute tolerance level
        result.isclose(100, 95, 0.1, 0.0)
        result.isclose(100, 95, 0.01, 0.0)

        # `result` also has a `log` method that can be used
        # for adding extra information on the output
        result.log(
            "This is a log message, it will be displayed"
            " along with other assertion details."
        )

        result.log(
            """
Multi-line log - will use the first non-empty line as its description and truncate after the 80 char.
The second line shall not occur in description.
            """
        )

        # Boolean checks
        result.true("foo" == "foo", description="Boolean Truthiness check")
        result.false(5 < 2, description="Boolean Falseness check")

        result.fail("This is an explicit failure.")

        # Membership checks
        result.contain("foo", "foobar", description="Passing membership")
        result.not_contain(
            member=10,
            container={"a": 1, "b": 2},
            description="Failing membership",
        )

        # Slice comparison (inclusion)
        result.equal_slices(
            [1, 2, 3, 4, 5, 6, 7, 8],
            ["a", "b", 3, 4, "c", "d", 7, 8],
            slices=[slice(2, 4), slice(6, 8)],
            description="Comparison of slices",
        )

        # Slice comparison (exclusion)
        # For the example below, each separate slice comparison fails
        # however the overall assertion still passes as common exclusion
        # indices of two slices are [2, 3], which is the same values `3`, `4`
        # in both iterables.
        result.equal_exclude_slices(
            [1, 2, 3, 4, 5, 6, 7, 8],
            ["a", "b", 3, 4, "c", "d", "e", "f"],
            slices=[slice(0, 2), slice(4, 8)],
            description="Comparison of slices (exclusion)",
        )

        # We can test if 2 blocks of textual content have differences with
        # comparison option --ignore-space-change, --ignore-whitespaces and
        # --ignore-blank-lines, also we can spefify output delta in unified
        # or context mode.
        result.diff("abc\nxyz\n", "abc\nxyz\n\n", ignore_blank_lines=True)
        result.diff(
            "1\r\n1\r\n1\r\nabc\r\nxy z\r\n2\r\n2\r\n2\r\n",
            "1\n1\n1\nabc \nxy\t\tz\n2\n2\n2\n",
            ignore_space_change=True,
            unified=3,
        )
        result.diff("abcde\nfghij\n", "bcdef\nghijk")

        # `result` has a `markdown` method that can be used for adding markdown
        # text in the report. Set escape=False to allow raw HTML code.
        result.markdown(
            """
<div style="font-size:80px;font-family:Arial;font-weight:bold;">
    <i class="fa fa-check-square" style="color:green;padding-right:5px;"></i>
    Testplan
</div>

Testplan is a [Python](http://python.org) package that can start a local live
environment, setup mocks, connections to services and run tests against these.
It provides:

  * ``MultiTest`` a feature extensive functional testing system with a rich set
    of *assertions* and report rendering logic.
  * Built-in inheritable drivers to create a local live *environment*.
  * Configurable, diverse and expandable test execution mechanism including
    *parallel* execution capability.
  * Test *tagging* for flexible filtering and selective execution as well as
    generation of multiple reports (for each tag combination).
  * Integration with other unit testing frameworks (like GTest).
  * Rich, unified reports (json/PDF/XML) and soon (HTML/UI).
  
# Basic Example

```python
import sys

from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase


def multiply(numA, numB):
    return numA * numB


@testsuite
class BasicSuite(object):

    @testcase
    def basic_multiply(self, env, result):
        result.equal(multiply(2, 3), 6, description='Passing assertion')
        result.equal(multiply(2, 2), 5, description='Failing assertion')


@test_plan(name='Multiply')
def main(plan):
    test = MultiTest(name='MultiplyTest',
                     suites=[BasicSuite()])
    plan.add(test)


if __name__ == '__main__':
  sys.exit(not main())
```
        """,
            description="Markdown example",
            escape=False,
        )

        # This `log_html` method is a shortcut of `markdown` method  which disabled
        # escape.
        result.log_html(
            """
<div style="font-size:80px;font-family:Arial;font-weight:bold;">
    <i class="fa fa-check-square" style="color:green;padding-right:5px;"></i>
    Testplan
</div>
        """,
            description="HTML example",
        )

        # `result` has a `log_code` method that can be used for adding
        # source code in the report.
        result.log_code(
            """
#include<stdio.h>

int main()
{
    return 0
}
        """,
            language="c",
            description="C codelog example",
        )

        result.log_code(
            """
import os
print(os.uname())
        """,
            language="python",
            description="Python codelog example",
        )

        x = range(0, 10)
        y = range(0, 10)
        plot.plot(x, y)

        result.matplot(
            plot, width=2, height=2, description="Simple matplot example"
        )


@test_plan(
    name="Basic Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Basic Assertions Test",
            suites=[
                SampleSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_skip.py

#!/usr/bin/env python
"""
This example shows usage of skip assertion.
"""
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class SkipSuite:
    @testcase
    def skip_me(self, env, result):
        result.true(True)
        result.skip("call skip assertion")
        result.fail("skip me")

    @testcase(parameters=tuple(range(10)))
    def condition_skip(self, env, result, num):
        if num % 2 == 0:
            result.skip("This testcase is marked as skipped")
        else:
            result.log("This is a log message")


@test_plan(
    name="Skip Assertion Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Skip Assertion Test",
            suites=[
                SkipSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_group.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of assertion group.
"""
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class GroupSuite:
    """
    result object has a `group` method that can be used for grouping
    assertions together. This has no effect on stdout, however it will
    be formatted with extra indentation on PDF reports for example.
    """

    @testcase
    def test_assertion_group(self, env, result):

        result.equal(1, 1, description="Equality assertion outside the group")

        with result.group(description="Custom group description") as group:
            group.not_equal(2, 3, description="Assertion within a group")
            group.greater(5, 3)

            # Groups can have sub groups as well:
            with group.group(description="This is a sub group") as sub_group:
                sub_group.less(6, 3, description="Assertion within sub group")

        result.equal(
            "foo", "foo", description="Final assertion outside all groups"
        )


@test_plan(
    name="Group Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Group Assertions Test",
            suites=[
                GroupSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_exception.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of checking exception.
"""
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class RaisedSuite:
    """
    result` object has `raises` and `not_raises` methods that can be
    as context managers to check if a given block of code raises / not
    raises a given exception
    """

    @testcase
    def test_raised_exceptions(self, env, result):

        with result.raises(KeyError):
            {"foo": 3}["bar"]

        # Exception message pattern check (`re.search` is used implicitly)

        with result.raises(
            ValueError,
            pattern="foobar",
            description="Exception raised with custom pattern.",
        ):
            raise ValueError("abc foobar xyz")

        # Custom function check (func should accept
        # exception object as a single arg)

        class MyException(Exception):
            def __init__(self, value):
                self.value = value

        def custom_func(exc):
            return exc.value % 2 == 0

        with result.raises(
            MyException,
            func=custom_func,
            description="Exception raised with custom func.",
        ):
            raise MyException(4)

        # `not_raises` passes when raised exception
        # type does match any of the declared exception classes
        # It is logically inverse of `result.raises`.

        with result.not_raises(TypeError):
            {"foo": 3}["bar"]

        # `not_raises` can also check if a certain exception has been raised
        # WITHOUT matching the given `pattern` or `func`

        # Exception type matches but pattern does not -> Pass
        with result.not_raises(
            ValueError,
            pattern="foobar",
            description="Exception not raised with custom pattern.",
        ):
            raise ValueError("abc")

        # Exception type matches but func does not -> Pass
        with result.not_raises(
            MyException,
            func=custom_func,
            description="Exception not raised with custom func.",
        ):
            raise MyException(5)


@test_plan(
    name="Exception Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Exception Assertions Test",
            suites=[
                RaisedSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_dict.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of dict assertion namespaces.
"""
import re
import sys
from testplan import test_plan
from testplan.common.utils import comparison
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class DictSuite:
    """
    `result.dict` namespace can be used for applying advanced
    assertion rules to dictionaries, which can be nested.
    """

    @testcase
    def test_dict_namespace(self, env, result):

        actual = {"foo": 1, "bar": 2, "baz_excluded": 2}

        expected = {"foo": 1, "bar": 5, "baz_excluded": 5, "extra-key": 10}

        # `dict.match` (recursively) matches elements of the dictionaries
        result.dict.match(
            actual,
            expected,
            description="Simple dict match",
            exclude_keys=["baz_excluded"],
        )

        # `dict.match` supports nested data as well

        actual = {"foo": {"alpha": [1, 2, 3], "beta": {"color": "red"}}}

        expected = {"foo": {"alpha": [1, 2], "beta": {"color": "blue"}}}

        result.dict.match(actual, expected, description="Nested dict match")

        # It is possible to use custom comparators with `dict.match`
        actual = {
            "foo": [1, 2, 3],
            "bar": {"color": "blue"},
            "baz": "hello world",
        }

        expected = {
            "foo": [1, 2, lambda v: isinstance(v, int)],
            "bar": {"color": comparison.In(["blue", "red", "yellow"])},
            "baz": re.compile(r"\w+ world"),
        }

        result.dict.match(
            actual, expected, description="Dict match: Custom comparators"
        )

        # You can also specify a comparator function to apply to all values in
        # your dict. Standard comparators are available under
        # testplan.common.utils.comparison.COMPARE_FUNCTIONS but any function
        # f(x: Any, y: Any) -> bool can be used.
        actual = {"foo": 1, "bar": 2, "baz": 3}
        expected = {"foo": 1.0, "bar": 2.0, "baz": 3.0}

        result.dict.match(
            actual,
            expected,
            description="default assertion passes because the values are "
            "numerically equal",
        )
        result.dict.match(
            actual,
            expected,
            description="when we check types the assertion will fail",
            value_cmp_func=comparison.COMPARE_FUNCTIONS["check_types"],
        )

        actual = {"foo": 1.02, "bar": 2.28, "baz": 3.50}
        expected = {"foo": 0.98, "bar": 2.33, "baz": 3.46}
        result.dict.match(
            actual,
            expected,
            description="use a custom comparison function to check within a "
            "tolerance",
            value_cmp_func=lambda x, y: abs(x - y) < 0.1,
        )

        # The report_mode can be specified to limit the comparison
        # information stored. By default all comparisons are stored and added
        # to the report, but you can choose to discard some comparisons to
        # reduce the size of the report when comparing very large dicts.
        actual = {"key{}".format(i): i for i in range(10)}
        expected = actual.copy()
        expected["bad_key"] = "expected"
        actual["bad_key"] = "actual"
        result.dict.match(
            actual,
            expected,
            description="only report the failing comparison",
            report_mode=comparison.ReportOptions.FAILS_ONLY,
        )

        # `dict.check` can be used for checking existence / absence
        # of keys within a dictionary

        result.dict.check(
            dictionary={"foo": 1, "bar": 2, "baz": 3},
            has_keys=["foo", "alpha"],
            absent_keys=["bar", "beta"],
        )

        # `dict.log` can be used to log a dictionary in human readable format.

        result.dict.log(
            dictionary={
                "foo": [1, 2, 3],
                "bar": {"color": "blue"},
                "baz": "hello world",
            }
        )


@test_plan(
    name="Dict Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Dict Assertions Test",
            suites=[
                DictSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_fix.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of fix assertion namespaces.
"""
import re
import sys
from testplan import test_plan
from testplan.common.utils import comparison
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class FixSuite:
    """
    `result.fix` namespace can be used for applying advanced
    assertion rules to fix messages, which can
    be nested (e.g. repeating groups)
    For more info about FIX protocol, please see:
    https://en.wikipedia.org/wiki/Financial_Information_eXchange
    """

    @testcase
    def test_fix_namespace(self, env, result):

        # `fix.match` can compare two fix messages, and
        # supports custom comparators (like `dict.match`)

        fix_msg_1 = {
            36: 6,
            22: 5,
            55: 2,
            38: 5,
            555: [
                {
                    600: "A",
                    601: "A",
                    683: [{688: "a", 689: None}, {688: "b", 689: "b"}],
                },
                {
                    600: "B",
                    601: "B",
                    683: [{688: "c", 689: "c"}, {688: "d", 689: "d"}],
                },
            ],
        }

        fix_msg_2 = {
            36: 6,
            22: 5,
            55: 2,
            38: comparison.GreaterEqual(4),
            555: [
                {
                    600: "A",
                    601: "B",
                    683: [
                        {688: "a", 689: re.compile(r"[a-z]")},
                        {688: "b", 689: "b"},
                    ],
                },
                {
                    600: "C",
                    601: "B",
                    683: [
                        {688: "c", 689: comparison.In(("c", "d"))},
                        {688: "d", 689: "d"},
                    ],
                },
            ],
        }
        result.fix.match(fix_msg_1, fix_msg_2)

        # `fix.check` can be used for checking existence / absence
        # of certain tags in a fix message

        result.fix.check(
            msg=fix_msg_1, has_tags=[26, 22, 11], absent_tags=[444, 555]
        )

        # `fix.log` can be used to log a fix message in human readable format.

        result.fix.log(
            msg={
                36: 6,
                22: 5,
                55: 2,
                38: 5,
                555: [{556: "USD", 624: 1}, {556: "EUR", 624: 2}],
            }
        )


@test_plan(
    name="Fix Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Fix Assertions Test",
            suites=[
                FixSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_regex.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of regex assertion namespaces.
"""
import os
import re
import sys
from testplan import test_plan
from testplan.common.utils import comparison
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class RegexSuite:
    """
    `result.regex` contains methods for regular expression assertions
    """

    @testcase
    def test_regex_namespace(self, env, result):

        # `regex.match` applies `re.match` with the given `regexp` and `value`
        result.regex.match(
            regexp="foo", value="foobar", description="string pattern match"
        )

        # We can also pass compiled SRE objects as well:
        result.regex.match(
            regexp=re.compile("foo"),
            value="foobar",
            description="re.Pattern match",
        )

        result.regex.match(
            regexp=b"\x01\w+ line",
            value=b"\x01first line \x01 \a\b\c",
            description="bytes match",
        )

        # `regex.multiline_match` implicitly passes `re.MULTILINE`
        # and `re.DOTALL` flags to `re.match`

        multiline_text = os.linesep.join(
            ["first line", "second line", "third line"]
        )

        result.regex.multiline_match(
            "first line.*second", multiline_text, description="multiline match"
        )

        # supports matching bytes data as well
        result.regex.multiline_match(
            b"first line.*second",
            multiline_text.encode(),
            description="multiline bytes match",
        )

        # `regex.not_match` returns True if the
        # given pattern does not match the value

        result.regex.not_match("baz", "foobar")

        # `regex.multiline_not_match` implicitly passes `re.MULTILINE`
        # and `re.DOTALL` flags to `re.match`

        result.regex.multiline_not_match("foobar", multiline_text)

        # `regex.search` runs pattern match via `re.search`
        result.regex.search("second", multiline_text)

        # `regex.search_empty` returns True when the given
        # pattern does not exist in the text.
        result.regex.search_empty(
            "foobar", multiline_text, description="Passing search empty"
        )

        result.regex.search_empty(
            "second", multiline_text, description="Failing search_empty"
        )

        # `regex.findall` matches all of the occurrences of the pattern
        # in the given string and optionally runs an extra condition function
        # against the number of matches
        text = "foo foo foo bar bar foo bar"

        result.regex.findall(
            regexp="foo",
            value=text,
            condition=lambda num_matches: 2 < num_matches < 5,
        )

        # Equivalent assertion with more readable output
        result.regex.findall(
            regexp="foo",
            value=text,
            condition=comparison.Greater(2) & comparison.Less(5),
        )

        # `regex.matchline` can be used for checking if a given pattern
        # matches one or more lines in the given text
        result.regex.matchline(
            regexp=re.compile(r"\w+ line$"),
            value=multiline_text,
            description="match line",
        )

        # supports matching bytes data as well
        result.regex.matchline(
            regexp=re.compile(b"\w+ line$"),
            value=multiline_text.encode(),
            description="match line bytes",
        )


@test_plan(
    name="Regex Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Regex Assertions Test",
            suites=[
                RegexSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_table.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of table assertion namespaces.
"""
import re
import sys
import random
from copy import deepcopy

from testplan import test_plan
from testplan.common.utils import comparison
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.common.serialization.fields import LogLink, FormattedValue
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class TableSuite:
    """
    We can use `result.table` namespace to apply table specific checks.
    A table is represented either as a
    list of dictionaries, or a list of lists
    that have columns as the first item and the
    rows as the rest
    """

    @testcase
    def test_table_namespace(self, env, result):

        list_of_dicts = [
            {"name": "Bob", "age": 32},
            {"name": "Susan", "age": 24},
            {"name": "Rick", "age": 67},
        ]

        list_of_lists = [
            ["name", "age"],
            ["Bob", 32],
            ["Susan", 24],
            ["Rick", 67],
        ]

        sample_table = [
            ["symbol", "amount"],
            ["AAPL", 12],
            ["GOOG", 21],
            ["FB", 32],
            ["AMZN", 5],
            ["MSFT", 42],
        ]

        large_table = [sample_table[0]] + sample_table[1:] * 100

        # We can log the table using result.table.log, either a list of dicts
        # or a list of lists

        result.table.log(list_of_dicts, description="Table Log: list of dicts")
        result.table.log(
            list_of_lists,
            display_index=True,
            description="Table Log: list of lists",
        )
        result.table.log(list_of_lists[:1], description="Empty table")
        result.table.log(
            [{"name": "Bob", "age": 32}, {"name": "Susan"}],
            description="Empty cell",
        )
        result.table.log(
            [[1, 2, 3], ["abc", "def", "xyz"]], description="Non-string header"
        )

        # When tables with over 10 rows are logged:
        #   * In the PDF report, only the first and last 5 rows are shown. The
        #     row indices are then also shown by default.
        #   * In console out the entire table will be shown, without indices.
        result.table.log(large_table[:21], description="Table Log: many rows")

        # When tables are too wide:
        #   * In the PDF report, the columns are split into tables over multiple
        #     rows. The row indices are then also shown by default.
        #   * In console out the table will be shown as is, if the formatting
        #     looks odd the output can be piped into a file.
        columns = [["col_{}".format(i) for i in range(20)]]
        rows = [
            ["row {} col {}".format(i, j) for j in range(20)]
            for i in range(10)
        ]
        result.table.log(columns + rows, description="Table Log: many columns")

        # When the cell values exceed the character limit:
        #   * In the PDF report they will be truncated and appended with '...'.
        #   * In console out, should they also be truncated?
        long_cell_table = [
            ["Name", "Age", "Address"],
            ["Bob Stevens", "33", "89 Trinsdale Avenue, LONDON, E8 0XW"],
            ["Susan Evans", "21", "100 Loop Road, SWANSEA, U8 12JK"],
            ["Trevor Dune", "88", "28 Kings Lane, MANCHESTER, MT16 2YT"],
            ["Belinda Baggins", "38", "31 Prospect Hill, DOYNTON, BS30 9DN"],
            ["Cosimo Hornblower", "89", "65 Prospect Hill, SURREY, PH33 4TY"],
            ["Sabine Wurfel", "31", "88 Clasper Way, HEXWORTHY, PL20 4BG"],
        ]
        result.table.log(long_cell_table, description="Table Log: long cells")

        # Add external/internal link in the table log
        result.table.log(
            [
                ["Description", "Data"],
                [
                    "External Link",
                    LogLink(link="https://www.google.com", title="Google"),
                ],
                # Require plan.runnable.disable_reset_report_uid() in main function
                # to avoid generating uuid4 as the report uid so that we can use
                # the test name as the link in the report.
                [
                    "Internal Link",
                    LogLink(
                        link="/Assertions%20Test/SampleSuite/test_basic_assertions",
                        title="test_basic_assertions",
                        inner=True,
                    ),
                ],
            ],
            description="Link to external/internal",
        )

        # Customize formatted value in the table log
        result.table.log(
            [
                ["Description", "Data"],
                [
                    "Formatted Value - 0.6",
                    FormattedValue(display="60%", value=0.6),
                ],
                [
                    "Formatted Value - 0.08",
                    FormattedValue(display="8%", value=0.08),
                ],
            ],
            description="Formatted value",
        )

        result.table.match(
            list_of_lists,
            list_of_lists,
            description="Table Match: list of list vs list of list",
        )

        result.table.match(
            list_of_dicts,
            list_of_dicts,
            description="Table Match: list of dict vs list of dict",
        )

        result.table.match(
            list_of_dicts,
            list_of_lists,
            description="Table Match: list of dict vs list of list",
        )

        result.table.diff(
            list_of_lists,
            list_of_lists,
            description="Table Diff: list of list vs list of list",
        )

        result.table.diff(
            list_of_dicts,
            list_of_dicts,
            description="Table Diff: list of dict vs list of dict",
        )

        result.table.diff(
            list_of_dicts,
            list_of_lists,
            description="Table Diff: list of dict vs list of list",
        )

        # For table match, Testplan allows use of custom comparators
        # (callables & regex) instead of plain value matching

        actual_table = [
            ["name", "age"],
            ["Bob", 32],
            ["Susan", 24],
            ["Rick", 67],
        ]

        expected_table = [
            ["name", "age"],
            # Regex match for row 1, name column
            # Callable match for row 1, age column
            [re.compile(r"\w{3}"), lambda age: 30 < age < 40],
            ["Susan", 24],  # simple match with exact values for row 2
            # Callable match for row 3 name column
            # Simple match for row 3 age column
            [lambda name: name in ["David", "Helen", "Pablo"], 67],
        ]

        result.table.match(
            actual_table,
            expected_table,
            description="Table Match: simple comparators",
        )

        result.table.diff(
            actual_table,
            expected_table,
            description="Table Diff: simple comparators",
        )

        # Equivalent assertion as above, using Testplan's custom comparators
        # These utilities produce more readable output

        expected_table_2 = [
            ["name", "age"],
            [
                re.compile(r"\w{3}"),
                comparison.Greater(30) & comparison.Less(40),
            ],
            ["Susan", 24],
            [comparison.In(["David", "Helen", "Pablo"]), 67],
        ]

        result.table.match(
            actual_table,
            expected_table_2,
            description="Table Match: readable comparators",
        )

        result.table.diff(
            actual_table,
            expected_table_2,
            description="Table Diff: readable comparators",
        )

        # By default `None` value means the cell is empty, it is
        # used as a placeholder

        table = [
            ["Action", "Col1", "Col2", "Col3"],
            ["Action1", "Value1", "Value2", None],
            ["Action2", "Value1", None, "Value3"],
            ["Action3", None, "Value2", "Value3"],
        ]
        expected_table = [
            ["Action", "Col1", "Col2"],
            ["Action1", "Value1", "Value2"],
            ["Action2", "Value1", None],
            ["Action3", None, "Value2"],
        ]
        result.table.match(
            table,
            expected_table,
            description="Table Match: Empty cells",
            include_columns=["Action", "Col1", "Col2"],
        )
        result.table.diff(
            table,
            expected_table,
            description="Table Diff: Empty cells",
            exclude_columns=["Col3"],
        )

        # The match and diff can be limited to certain columns

        table = self.create_table(3, 5)
        mod_table = deepcopy(table)
        mod_table[0]["column_0"] = 123
        mod_table[1]["column_1"] = 123

        result.table.match(
            table,
            mod_table,
            include_columns=["column_1", "column_2"],
            report_all=True,
            description="Table Match: Ignored columns",
        )

        table = self.create_table(3, 5)
        mod_table = deepcopy(table)
        mod_table[0]["column_0"] = 123
        mod_table[1]["column_1"] = 123

        result.table.diff(
            table,
            mod_table,
            include_columns=["column_1", "column_2"],
            report_all=True,
            description="Table Diff: Ignored columns",
        )

        # While comparing tables with large number of columns
        # we can 'trim' some of the columns to get more readable output

        table_with_many_columns = self.create_table(30, 10)

        # Only use 2 columns for comparison, trim the rest
        result.table.match(
            table_with_many_columns,
            table_with_many_columns,
            include_columns=["column_1", "column_2"],
            report_all=False,
            description="Table Match: Trimmed columns",
        )

        result.table.diff(
            table_with_many_columns,
            table_with_many_columns,
            include_columns=["column_1", "column_2"],
            report_all=False,
            description="Table Diff: Trimmed columns",
        )

        # While comparing tables with large number of rows
        # we can stop comparing if the number of failed rows exceeds the limit

        matching_rows_1 = [
            {"amount": idx * 10, "product_id": random.randint(1000, 5000)}
            for idx in range(5)
        ]

        matching_rows_2 = [
            {"amount": idx * 10, "product_id": random.randint(1000, 5000)}
            for idx in range(500)
        ]

        row_diff_a = [
            {"amount": 25, "product_id": 1111},
            {"amount": 20, "product_id": 2222},
            {"amount": 50, "product_id": 3333},
        ]

        row_diff_b = [
            {"amount": 35, "product_id": 1111},
            {"amount": 20, "product_id": 1234},
            {"amount": 20, "product_id": 5432},
        ]

        table_a = matching_rows_1 + row_diff_a + matching_rows_2
        table_b = matching_rows_1 + row_diff_b + matching_rows_2

        # We can 'trim' some rows and display at most 2 rows of failures
        result.table.match(
            table_a,
            table_b,
            fail_limit=2,
            report_all=False,
            description="Table Match: Trimmed rows",
        )

        # Only display mismatching rows, with a maximum limit of 2 rows
        result.table.diff(
            table_a,
            table_b,
            fail_limit=2,
            report_all=False,
            description="Table Diff: Trimmed rows",
        )

        # result.table.column_contain can be used for checking if all of the
        # cells on a table's column exists in a given list of values
        result.table.column_contain(
            values=["AAPL", "AMZN"], table=sample_table, column="symbol"
        )

        # We can use `limit` and `report_fails_only` arguments for producing
        # less output for large tables

        result.table.column_contain(
            values=["AAPL", "AMZN"],
            table=large_table,
            column="symbol",
            limit=20,  # Process 50 items at most
            report_fails_only=True,  # Only include failures in the result
        )

    @staticmethod
    def create_table(num_cols, num_rows):
        return [
            {"column_{}".format(idx): i * idx for idx in range(num_cols)}
            for i in range(num_rows)
        ]


@test_plan(
    name="Table Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    # For saving the internal link in the report, use test
    # name instead of uuid4 as report uid.
    plan.runnable.disable_reset_report_uid()

    plan.add(
        MultiTest(
            name="Table Assertions Test",
            suites=[
                TableSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_xml.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of xml assertion namespaces.
"""
import re
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class XMLSuite:
    """
    `result.xml` namespace can be used for applying advanced assertion
    logic onto XML data.
    """

    @testcase
    def test_xml_namespace(self, env, result):

        # `xml.check` can be used for checking if given tags & XML namespaces
        # contain the expected values

        xml_1 = """
                <Root>
                    <Test>Foo</Test>
                </Root>
            """

        result.xml.check(
            element=xml_1,
            xpath="/Root/Test",
            description="Simple XML check for existence of xpath.",
        )

        xml_2 = """
                <Root>
                    <Test>Value1</Test>
                    <Test>Value2</Test>
                </Root>
            """

        result.xml.check(
            element=xml_2,
            xpath="/Root/Test",
            tags=["Value1", "Value2"],
            description="XML check for tags in the given xpath.",
        )

        xml_3 = """
                <SOAP-ENV:Envelope
                  xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
                    <SOAP-ENV:Header/>
                    <SOAP-ENV:Body>
                        <ns0:message
                          xmlns:ns0="http://testplan">Hello world!</ns0:message>
                    </SOAP-ENV:Body>
                </SOAP-ENV:Envelope>
            """

        result.xml.check(
            element=xml_3,
            xpath="//*/a:message",
            tags=[re.compile(r"Hello*")],
            namespaces={"a": "http://testplan"},
            description="XML check with namespace matching.",
        )


@test_plan(
    name="XML Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="XML Assertions Test",
            suites=[
                XMLSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_custom_style.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how to customize the style of assertion header on web UI.
"""

import sys

from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum


@testsuite
class SimpleSuite(object):
    @testcase
    def test_styled_assertions(self, env, result):
        # Basic assertion containing argument `custom_style`
        result.equal(
            "foo",
            "foo",
            description="Equality test",
            custom_style={"color": "#4A2BFF", "background-color": "#FFDDDD"},
        )
        result.log(
            "This is a example of applying custom style",
            description="Log a message",
            custom_style={"font-size": "200%", "font-style": "italic"},
        )

        # `group` method does not accept argument `custom_style`, while the
        # assertion methods in the group can accept argument `custom_style`.
        with result.group(description="Custom group description") as group:
            group.greater(
                5,
                3,
                description="Greater than",
                custom_style={"background-color": "#FFFFC4"},
            )
            group.less(
                6,
                4,
                description="Less than",
                custom_style={"background-color": "#FFFFC4"},
            )


@test_plan(
    name="Custom Styles of Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Custom Styles of Assertions Test",
            suites=[
                SimpleSuite(),
            ],
        )
    )


if __name__ == "__main__":
    sys.exit(not main())

Summarization

Required files:

test_plan.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
Demonstrates assertions results summarization.
Sometimes having a huge list of results is not mandatory or useful
and a report that displays X passed results and then Y results of each
failure category would be more preferable.

Testplan will group summarized assertion data by category, assertion type and
pass/fail status. Certain assertion types (e.g. `fix match`) may produce
more detailed summary groups.

For example, if we have a testcase that makes use of 3 assertion types (
 equal, less, contain), the generated summary structure will be like:

Default Category Group
    equality assertions group
        passing equality assertions group
        failing equality assertions group
    less assertions group
        passing less assertions group
        failing less assertions group
    contain assertions group
        passing contain assertions group
        failing contain assertions group

For fix / dict match assertions, Testplan will generate another layer of
groups for each tag / key group:

The summary entries for 100000 fix match assertions
that some fail on tag 35 and some on both
35 and 55 tags:

Summary 1010 entries out of 100000.

Displaying 5 passed out of 510:
  Fixmatch 1
  Fixmatch 2
  ...
  Fixmatch 5

Displaying 3 failures on key 35 (out of 240):
  Fixmatch 1
  Fixmatch 2
  Fixmatch 3

Displaying 3 failures on keys (35, 55) (out of 260):
  Fixmatch 1
  Fixmatch 2
  Fixmatch 3

If 'category' flag is used on fixmatch, then the previous
summary will be created for each category.
"""
import random
import sys

from testplan.testing.multitest import MultiTest, testsuite, testcase

from testplan import test_plan
from testplan.report.testing.styles import Style, StyleEnum


def new_fix(reference=False):
    """
    Returns a reference or a randomized fix message that
    will be used to generate multiple failure categories.
    """
    if reference is True:
        _38 = 6
        _55 = 2
        _688 = "a"
    else:
        choices = {
            "38": [6] * 7 + [6] * 3,
            "55": [2] * 9 + [1] * 1,
            "688": ["a"] * 9 + ["b"] * 1,
        }

        _38 = random.choice(choices["38"])
        _55 = random.choice(choices["55"])
        _688 = random.choice(choices["688"])

    return {
        36: 6,
        22: 5,
        55: _55,
        38: _38,
        555: [
            {
                600: "A",
                601: "A",
                683: [{688: _688, 689: "a"}, {688: "b", 689: "b"}],
            },
            {
                600: "B",
                601: "B",
                683: [{688: "c", 689: "c"}, {688: "d", 689: "d"}],
            },
        ],
    }


@testsuite
class AssertionsSummary:
    @testcase(summarize=True)
    def mixed_assertions_summary(self, env, result):
        """
        When we have summarized testcase that has different assertion types,
        we will end up with a separate group for each assertion type.

        Assertions of the same type (e.g. ``equal``, ``less``) will be
        grouped together, however separate grouping can be enabled by passing
        ``category`` argument to assertions.
        """
        for i in range(500):
            result.equal(i, i)
            result.equal(i * 2, i * 2, category="Multiples")
            result.less(i, i + 1)
            result.less(i * 2, i * 2, category="Multiples")
            result.contain(i, [i, i + 1, i + 2])
            result.contain(i, [i * 2, i * 3, i * 4], category="Multiples")

    @testcase(
        parameters=range(2),
        summarize=True,
        num_passing=2,
        num_failing=2,
        key_combs_limit=2,
    )
    def parameterized_fixmatch_summary(self, env, result, idx):
        """
        Demonstrates customization of how many passed/failed result entries
        to present in the testcase summary report of input 1000 assertions.
        """
        reference = new_fix(reference=True)
        for _ in range(1000):
            result.fix.match(reference, new_fix(), "Fixmatch assertion")

    # summarize=True option will use default values for the number of
    # passed/failed fixmatch results to be displayed in the testcase report.
    @testcase(summarize=True)
    def fixmatch_summary(self, env, result):
        """
        Testcase report will contain a summary of 500 fixmatch passed/failed
        result entries and they will be grouped by the input
        fixmatch category given (Upstream or Downstream).

        Failing fix matches will also be grouped further per failing tag groups.
        """
        reference = new_fix(reference=True)
        category = random.choice(["Upstream", "Downstream"])
        for _ in range(500):
            result.fix.match(
                reference, new_fix(), "Fixmatch assertion", category=category
            )


@test_plan(
    name="Assertions Example",
    pdf_path="report.pdf",
    pdf_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
)
def main(plan):
    plan.add(
        MultiTest(name="AssertionsSummaryTest", suites=[AssertionsSummary()])
    )


if __name__ == "__main__":
    sys.exit(not main())

Plotly

Required files:

test_plan.py

#!/usr/bin/env python
"""
This example shows usage of chart assertions.
"""
import os
import re
import sys
import random

from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase

import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go


@testsuite
class SampleSuite:
    @testcase
    def line_tests(self, env, result):
        """
        Example from https://plotly.com/python/line-charts/
        """
        df = px.data.gapminder().query("continent=='Oceania'")
        fig = px.line(df, x="year", y="lifeExp", color="country")
        result.plotly(fig, description="Gapminder of Oceania")

    @testcase
    def timeline_tests(self, env, result):
        """
        Example from https://plotly.com/python/gantt/
        """
        df = pd.DataFrame(
            [
                dict(
                    Task="Job A",
                    Start="2009-01-01",
                    Finish="2009-02-28",
                    Completion_pct=50,
                ),
                dict(
                    Task="Job B",
                    Start="2009-03-05",
                    Finish="2009-04-15",
                    Completion_pct=25,
                ),
                dict(
                    Task="Job C",
                    Start="2009-02-20",
                    Finish="2009-05-30",
                    Completion_pct=75,
                ),
            ]
        )

        fig = px.timeline(
            df,
            x_start="Start",
            x_end="Finish",
            y="Task",
            color="Completion_pct",
        )
        fig.update_yaxes(autorange="reversed")

        result.plotly(fig, description="Task")

    @testcase
    def bar_tests(self, env, result):
        """
        Example from https://plotly.com/python/bar-charts/
        """
        months = [
            "Jan",
            "Feb",
            "Mar",
            "Apr",
            "May",
            "Jun",
            "Jul",
            "Aug",
            "Sep",
            "Oct",
            "Nov",
            "Dec",
        ]

        fig = go.Figure()
        fig.add_trace(
            go.Bar(
                x=months,
                y=[20, 14, 25, 16, 18, 22, 19, 15, 12, 16, 14, 17],
                name="Primary Product",
                marker_color="indianred",
            )
        )
        fig.add_trace(
            go.Bar(
                x=months,
                y=[19, 14, 22, 14, 16, 19, 15, 14, 10, 12, 12, 16],
                name="Secondary Product",
                marker_color="lightsalmon",
            )
        )

        fig.update_layout(barmode="group", xaxis_tickangle=-45)
        result.plotly(fig, description="Rotated Bar Chart Labels")

    @testcase
    def pie_tests(self, env, result):
        """
        Example from https://plotly.com/python/pie-charts/
        """
        df = (
            px.data.gapminder()
            .query("year == 2007")
            .query("continent == 'Europe'")
        )
        df.loc[df["pop"] < 2.0e6, "country"] = "Other countries"
        fig = px.pie(
            df,
            values="pop",
            names="country",
            title="Population of European continent",
        )
        result.plotly(fig, description="Pie chart with plotly express")

    @testcase
    def scatter_tests(self, env, result):
        """
        Example from https://plotly.com/python/line-and-scatter/
        """
        df = px.data.iris()
        fig = px.scatter(
            df,
            x="sepal_width",
            y="sepal_length",
            color="species",
            size="petal_length",
            hover_data=["petal_width"],
        )
        result.plotly(fig, description="Set size and color with column names")

    @testcase
    def line_3d_tests(self, env, result):
        """
        Example from https://plotly.com/python/3d-line-plots/
        """
        rs = np.random.RandomState()
        rs.seed(0)

        def brownian_motion(T=1, N=100, mu=0.1, sigma=0.01, S0=20):
            dt = float(T) / N
            t = np.linspace(0, T, N)
            W = rs.standard_normal(size=N)
            W = np.cumsum(W) * np.sqrt(dt)  # standard brownian motion
            X = (mu - 0.5 * sigma**2) * t + sigma * W
            S = S0 * np.exp(X)  # geometric brownian motion
            return S

        dates = pd.date_range("2012-01-01", "2013-02-22")
        T = (dates.max() - dates.min()).days / 365
        N = dates.size
        start_price = 100
        y = brownian_motion(T, N, sigma=0.1, S0=start_price)
        z = brownian_motion(T, N, sigma=0.1, S0=start_price)

        fig = go.Figure(
            data=go.Scatter3d(
                x=dates,
                y=y,
                z=z,
                marker=dict(
                    size=4,
                    color=z,
                    colorscale="Viridis",
                ),
                line=dict(color="darkblue", width=2),
            )
        )

        fig.update_layout(
            width=800,
            height=700,
            autosize=False,
            scene=dict(
                camera=dict(
                    up=dict(x=0, y=0, z=1),
                    eye=dict(
                        x=0,
                        y=1.0707,
                        z=1,
                    ),
                ),
                aspectratio=dict(x=1, y=1, z=0.7),
                aspectmode="manual",
            ),
        )
        result.plotly(fig, description="Brownian Motion")


@test_plan(name="Charts Example")
def main(plan):
    plan.add(MultiTest(name="Chart Assertions Test", suites=[SampleSuite()]))


if __name__ == "__main__":
    sys.exit(not main())

Marking

These examples demonstrate the usage of the report_target decorator which allows modifying the default line number and filepath of assertions in the report. It does so by re-pointing both to line number and filepath information of the call stack that is marked and closest to the actual assertion in scope. For example, let us consider a call chain where a particular testcase calls an intermediary that in turn calls a utility function holding the assertion. By default, the line number and filepath of the entry would point to the assertion. If the marking decorator is applied to the intermediary, then it would point to the call of the utility. Finally, if the marking is applied to both the intermediary and the utility, then the entry would once again reference the assertion as the utility is the closest mark “pulling” the pointer.

Required files:

test_plan_linear.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example demonstrates the usage of the mark decorator for linear cases.
"""
import sys

from testplan import test_plan
from testplan.testing.multitest import testcase, testsuite, MultiTest
from testplan.testing.result import report_target


def helper(result):
    result.fail(description="Failure in helper.")


def intermediary(result):
    helper(result)


@report_target
def intermediary_marked(result):
    helper(result)


@testsuite(name="Example suite for linear testcases")
class Suite:
    @testcase(name="Testcase with no marking.")
    def test_unmarked(self, env, result):
        """
        Upon failure, points to assertion in helper.
        """
        helper(result)

    @testcase(name="Testcase with marked intermediary")
    def test_intermediary(self, env, result):
        """
        Upon failure, points to assertion in helper.
        """
        intermediary(result)

    @testcase(name="Testcase with marked intermediary and helper")
    def test_marked_intermediary(self, env, result):
        """
        Upon failure, points to call of helper in intermediary_marked.
        """
        intermediary_marked(result)


@test_plan(name="Plan")
def main(plan):
    plan.add(
        MultiTest(
            name="MultiTest", suites=[Suite()], testcase_report_target=False
        )
    )


if __name__ == "__main__":
    sys.exit(not main())
Required files:

test_plan_non_linear.py

#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example demonstrates the usage of the mark decorator for non-linear cases.
"""
import sys

from testplan import test_plan
from testplan.testing.multitest import testcase, testsuite, MultiTest
from testplan.testing.result import report_target


def helper(result):
    result.fail(description="Failure in helper.")


@report_target
def helper_marked(result):
    result.fail(description="Failure in marked helper.")


def intermediary(result):
    helper(result)


@report_target
def intermediary_marked(result, both):
    helper(result)
    if both:
        helper_marked(result)


@testsuite(name="Example suite")
class Suite:
    @testcase(name="Testcase with marked helper")
    def test_non_linear(self, env, result):
        """
        Non-linear test case for demonstrating various scenarios.
        """
        # Points to assertion in testcase.
        result.fail(description="Failure in testcase.")
        # Points to assertion in unmarked utility function.
        helper(result)
        # Points to assertion in umarked utility function, not intermediary.
        intermediary(result)
        # Points to marked intermediary instead of unmarked utility function.
        intermediary_marked(result, both=False)
        # Points to marked utility function instead of marked intermediary.
        intermediary_marked(result, both=True)


@test_plan(name="Plan")
def main(plan):
    plan.add(
        MultiTest(
            name="MultiTest", suites=[Suite()], testcase_report_target=False
        )
    )


if __name__ == "__main__":
    sys.exit(not main())