Commit a9ce0631 authored by Jakob Luettgau's avatar Jakob Luettgau
Browse files

PEP8.

parent 2da3a81e
......@@ -48,19 +48,25 @@ def log_get_job(log):
"""
Returns a dictionary with information about the current job.
"""
job = {}
jobrec = ffi.new("struct darshan_job *")
libdutil.darshan_log_get_job(log['handle'], jobrec)
job['uid'] = jobrec[0].uid
job['start_time'] = jobrec[0].start_time
job['end_time'] = jobrec[0].end_time
job['nprocs'] = jobrec[0].nprocs
job['jobid'] = jobrec[0].jobid
mstr = ffi.string(jobrec[0].metadata).decode("utf-8")
md = {}
for kv in mstr.split('\n')[:-1]:
k,v = kv.split('=', maxsplit=1)
md[k] = v
job['metadata'] = md
return job
......@@ -92,9 +98,11 @@ def log_get_mounts(log):
mnts = ffi.new("struct darshan_mnt_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_mounts(log['handle'], mnts, cnt)
for i in range(0, cnt[0]):
mntlst.append((ffi.string(mnts[0][i].mnt_path).decode("utf-8"),
ffi.string(mnts[0][i].mnt_type).decode("utf-8")))
return mntlst
......@@ -111,11 +119,10 @@ def log_get_modules(log):
"""
# used cached module index if already present
# use cached module index if already present
if log['modules'] != None:
return log['modules']
modules = {}
mods = ffi.new("struct darshan_mod_info **")
......@@ -125,7 +132,6 @@ def log_get_modules(log):
modules[ffi.string(mods[0][i].name).decode("utf-8")] = \
{'len': mods[0][i].len, 'ver': mods[0][i].ver, 'idx': mods[0][i].idx}
# add to cache
log['modules'] = modules
......@@ -133,7 +139,6 @@ def log_get_modules(log):
def log_get_name_records(log):
"""
Return a dictionary resovling hash to string (typically a filepath).
......@@ -169,7 +174,7 @@ def log_get_name_records(log):
def log_get_dxt_record(log, mod_name, mod_type, reads=True, writes=True, mode='pandas'):
def log_get_dxt_record(log, mod_name, mod_type, reads=True, writes=True, mode='dict'):
"""
Returns a dictionary holding a dxt darshan log record.
......@@ -242,8 +247,10 @@ def log_get_dxt_record(log, mod_name, mod_type, reads=True, writes=True, mode='p
rec['read_segments'].append(seg)
#pd.DataFrame([rec])
if mode == "pandas":
rec['read_segments'] = pd.DataFrame(rec['read_segments'])
rec['write_segments'] = pd.DataFrame(rec['write_segments'])
return rec
......@@ -268,7 +275,6 @@ def log_get_generic_record(log, mod_name, mod_type, mode='numpy'):
>>> darshan.log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
{'counters': array([...], dtype=uint64), 'fcounters': array([...])}
"""
modules = log_get_modules(log)
......@@ -292,6 +298,11 @@ def log_get_generic_record(log, mod_name, mod_type, mode='numpy'):
flst.append(rbuf[0].fcounters[i])
rec['fcounters'] = np.array(clst, dtype=np.float64)
if mode == "pandas":
pass
return rec
......@@ -315,12 +326,15 @@ def counter_names(mod_name, fcnts=False):
names = []
i = 0
if fcnts:
F = "f_"
else:
F = ""
end = "{0}_{1}NUM_INDICES".format(mod_name.upper(), F.upper())
var_name = "{0}_{1}counter_names".format(mod_name.lower(), F.lower())
while True:
try:
var = getattr(libdutil, var_name)
......@@ -441,10 +455,13 @@ def log_get_apxc_record(log):
cluster_modes = ['unknown', 'all2all', 'quad', 'hemi', 'snc4', 'snc2']
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules['DARSHAN_APXC']['idx'], buf)
if r < 1:
return None
prf = ffi.cast("struct darshan_apxc_perf_record **", buf)
hdr = ffi.cast("struct darshan_apxc_header_record **", buf)
if hdr[0].magic == 4707761685111591494:
mm = hdr[0].memory_mode & ~(1 << 31)
cm = hdr[0].cluster_mode & ~(1 << 31)
......@@ -462,4 +479,5 @@ def log_get_apxc_record(log):
for i in range(0, len(prf[0].counters)):
clst.append(prf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.uint64)
return rec
......@@ -21,8 +21,7 @@ class DarshanVersionError(NotImplementedError):
self.version = "0.0.0"
def __repr__(self):
return "DarshanVersionError('%s')" % str(sefl)
return "DarshanVersionError('%s')" % str(self)
def __str__(self):
return "%s requires libdarshanutil >= %s, have %s" % (self.msg, self.min_version, self.version)
......@@ -64,12 +64,9 @@ def filter(self, mods=None, name_records=None, pattern=None, regex=None):
# whitelist names that match pattern
if pattern != None or regex != None:
if re.match(pattern, value):
print("YES", pattern, value)
ids.append(key)
else:
print("NO", pattern, value)
# convert filenames/name_records mix into list of ids only
if name_records != None:
......
......@@ -46,14 +46,14 @@ class DarshanReport(object):
self.filename = filename
# options
self.data_format = data_format # Experimental: preferred internal representation: numpy useful for aggregations, dict good for export/REST
# might require alternative granularity: e.g., records, vs summaries?
# vs dict/pandas? dict/native?
self.data_format = data_format # Experimental: preferred internal representation: numpy useful for aggregations, dict good for export/REST
# might require alternative granularity: e.g., records, vs summaries?
# vs dict/pandas? dict/native?
self.automatic_summary = automatic_summary
# state dependent book-keeping
self.converted_records = False # true if convert_records() was called (unnumpyfy)
self.converted_records = False # true if convert_records() was called (unnumpyfy)
#
self.start_time = float('inf')
......@@ -355,39 +355,6 @@ class DarshanReport(object):
pass
def mod_agg(self, mod, ranks=None, files=None, preserve_rank=False, preserve_file=False):
"""
Aggregate counters for a given module name and return updated dictionary.
Args:
mod (str): Name of the mod to aggregate.
ranks (int or list): Only aggregate if rank is matched
files (int or list): Only aggregate if file is matched
preserve_rank: do not collapse ranks into single value
preserve_file: do not collapse files into single value
Return:
List of aggregated records
"""
# TODO: assert
c = None
fc = None
# aggragate
for rec in recs[mod]:
if mod not in ctx:
c = rec['counters']
fc = rec['counters']
else:
c = np.add(ctx[mod], rec['counters'])
fc = np.add(ctx[mod], rec['fcounters'])
return {'counters': c, 'fcounter': fc}
def info(self, metadata=False):
"""
Print information about the record for inspection.
......@@ -455,7 +422,7 @@ class DarshanReport(object):
def to_dict():
"""
Return dictionary representatino of report data.
Return dictionary representation of report data.
Args:
None
......@@ -477,7 +444,7 @@ class DarshanReport(object):
def to_json(self):
"""
Return JSON representatino of report data as string.
Return JSON representation of report data as string.
Args:
None
......@@ -494,5 +461,4 @@ class DarshanReport(object):
recs[mod][i]['counters'] = rec['counters'].tolist()
recs[mod][i]['fcounters'] = rec['fcounters'].tolist()
return json.dumps(data, cls=DarshanReportJSONEncoder)
......@@ -7,6 +7,7 @@ Subpackages
.. toctree::
darshan.backend
darshan.experimental
Submodules
----------
......
......@@ -40,10 +40,10 @@ def test_repeated_access():
log = backend.log_open("tests/input/sample.darshan")
rec1 = backend.log_get_mpiio_record(log)
rec2 = backend.log_get_mpiio_record(log)
rec = backend.log_get_mpiio_record(log)
rec = backend.log_get_mpiio_record(log) # fetch next
assert rec2 == None
assert rec is None
......
......@@ -40,10 +40,10 @@ def test_repeated_access():
log = backend.log_open("tests/input/sample.darshan")
rec1 = backend.log_get_posix_record(log)
rec2 = backend.log_get_posix_record(log)
rec = backend.log_get_posix_record(log)
rec = backend.log_get_posix_record(log) # fetch next
assert rec2 == None
assert rec is None
def test_ishouldrun():
......
......@@ -42,10 +42,10 @@ def test_repeated_access():
log = backend.log_open("tests/input/sample.darshan")
rec1 = backend.log_get_stdio_record(log)
rec2 = backend.log_get_stdio_record(log)
rec = backend.log_get_stdio_record(log)
rec = backend.log_get_stdio_record(log) # fetch next
assert rec2['counters'][3] == 68
assert rec['counters'][3] == 68
def test_ishouldrun():
......
......@@ -23,7 +23,7 @@ def test_metadata():
report = darshan.DarshanReport("tests/input/sample.darshan")
# check a metadata field
assert 4478544 == report.data['metadata']['job']['jobid']
assert 4478544 == report.metadata['job']['jobid']
def test_modules():
......@@ -32,8 +32,8 @@ def test_modules():
report = darshan.DarshanReport("tests/input/sample.darshan")
# check if number of modules matches
assert 4 == len(report.data['modules'])
assert 154 == report.data['modules']['MPI-IO']['len']
assert 4 == len(report.modules)
assert 154 == report.modules['MPI-IO']['len']
def test_load_records():
......@@ -41,17 +41,19 @@ def test_load_records():
report = darshan.DarshanReport("tests/input/sample.darshan")
report.mod_read_all_records("POSIX")
report.mod_read_all_records("POSIX")
assert 1 == len(report.data['records']['POSIX'])
def test_internal_references():
"""Test if the reference ids match. This tests mainly serves to make regressions verbose when the behavior is changed."""
"""
Test if the reference ids match. This tests mainly serves to make
regressions verbose when the behavior is changed.
"""
report = darshan.DarshanReport()
# check the convienience refs are working fine
check = id(report.records) == id(report.data['records'])
assert True == check
assert check is True
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment