test_report.py 3.84 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""Tests for `pydarshan` package."""

import pytest

import darshan


@pytest.fixture
def response():
    """Sample pytest fixture.

    See more at: http://doc.pytest.org/en/latest/fixture.html
    """
    pass


def test_metadata():
    """Sample for an expected property in counters."""

    report = darshan.DarshanReport("tests/input/sample.darshan")

    # check a metadata field
    assert 4478544 == report.metadata['job']['jobid']


def test_modules():
    """Sample for an expected number of modules."""

    report = darshan.DarshanReport("tests/input/sample.darshan")

    # check if number of modules matches
    assert 4 == len(report.modules)
    assert 154 == report.modules['MPI-IO']['len']


def test_load_records():
    """Test if loaded records match."""

    report = darshan.DarshanReport("tests/input/sample.darshan")

    report.mod_read_all_records("POSIX")

    assert 1 == len(report.data['records']['POSIX'])


49
@pytest.mark.parametrize("unsupported_record",
50
        ["DXT_POSIX", "DXT_MPIIO", "LUSTRE", "APMPI", "APXC"]
51 52 53 54 55 56 57 58 59 60 61
        )
def test_unsupported_record_load(caplog, unsupported_record):
    # check for appropriate logger warning when attempting to
    # load unsupported record
    report = darshan.DarshanReport("tests/input/sample.darshan")
    report.mod_read_all_records(mod=unsupported_record)
    for record in caplog.records:
        assert 'Currently unsupported' in record.message
        assert unsupported_record in record.message


62 63 64 65 66 67 68 69 70 71 72
def test_internal_references():
    """
    Test if the reference ids match. This tests mainly serves to make
    regressions verbose when the behavior is changed.
    """

    report = darshan.DarshanReport()

    # check the convienience refs are working fine
    check = id(report.records) == id(report.data['records'])
    assert check is True
Tyler Reddy's avatar
Tyler Reddy committed
73 74 75 76 77 78 79 80

def test_info_contents(capsys):
    # regression guard for the output from the info()
    # method of DarshanReport
    report = darshan.DarshanReport("tests/input/sample.darshan")
    report.info()
    captured = capsys.readouterr()
    expected_keys = ['Times',
Shane Snyder's avatar
Shane Snyder committed
81
                     'Executable',
Tyler Reddy's avatar
Tyler Reddy committed
82 83 84 85 86 87 88 89 90 91 92 93 94 95
                     'Processes',
                     'JobID',
                     'UID',
                     'Modules in Log',
                     'Loaded Records',
                     'Name Records',
                     'Darshan/Hints',
                     'DarshanReport']

    expected_values = ['2048', '4478544', '69615']
    expected_strings = expected_keys + expected_values

    for expected_string in expected_strings:
        assert expected_string in captured.out
96

Tyler Reddy's avatar
Tyler Reddy committed
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
@pytest.mark.parametrize("invalid_filepath", [
    # messy path that does not exist
    '#!$%',
    # path that exists but has no
    # actual log file
    '.',
    ]
    )
def test_report_invalid_file(invalid_filepath):
    # verify appropriate error handling for
    # provision of an invalid file path to
    # DarshanReport

    with pytest.raises(RuntimeError, match='Failed to open file'):
        darshan.DarshanReport(invalid_filepath)
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
def test_json_fidelity():
    # regression test for provision of appropriate
    # data by to_json() method of DarshanReport class
    report = darshan.DarshanReport("tests/input/sample.darshan")
    actual_json = report.to_json()

    for expected_key in ["version",
                         "metadata",
                         "job",
                         "uid",
                         "start_time",
                         "end_time",
                         "nprocs"]:
        assert expected_key in actual_json

    for expected_value in ['69615',
                           '1490000867',
                           '1490000983',
                           '2048',
                           'lustre',
                           'dvs',
                           'rootfs']:
        assert expected_value in actual_json