Commit 055e68c8 authored by Michael Salim's avatar Michael Salim
Browse files

Added ability to ls jobs according to state

(e.g. balsam ls --state JOB_FINISHED)

Fixed bug (inconsistent variable naming) which caused ranks_per_node
not to be assigned correctly by "balsam qsub" command line
parent f2c3d2f5
......@@ -64,9 +64,16 @@ def make_parser():
parser_job.add_argument('--wall-minutes', type=int, required=True)
type=int, required=True)
type=int, required=True)
type=int, required=True,
help='Number of compute nodes on which to run MPI '
'job. (Total number of MPI ranks determined as '
'num_nodes * ranks_per_node).')
type=int, required=True,
help='Number of MPI ranks per compute node. '
'(Total MPI ranks calculated from num_nodes * '
'ranks_per_node. If only 1 total ranks, treated as serial '
parser_job.add_argument('--allowed-site', action='append',
required=False, default=[BALSAM_SITE],
......@@ -145,6 +152,7 @@ def make_parser():
parser_ls.add_argument('--name', help="match any substring of job name")
parser_ls.add_argument('--history', help="show state history", action='store_true')
parser_ls.add_argument('--id', help="match any substring of job id")
parser_ls.add_argument('--state', help="list jobs matching a state")
parser_ls.add_argument('--wf', help="Filter jobs matching a workflow")
parser_ls.add_argument('--verbose', action='store_true')
parser_ls.add_argument('--tree', action='store_true', help="show DAG in tree format")
......@@ -182,9 +190,9 @@ def make_parser():
parser_qsub = subparsers.add_parser('qsub', help="add a one-line job to the database, bypassing Application table")
parser_qsub.add_argument('command', nargs='+')
parser_qsub.add_argument('-n', '--nodes', type=int, default=1)
parser_qsub.add_argument('-N', '--ppn', type=int, default=1)
parser_qsub.add_argument('--name', default='')
parser_qsub.add_argument('-n', '--nodes', type=int, default=1, help="Number of compute nodes on which to run job")
parser_qsub.add_argument('-N', '--ranks-per-node', type=int, default=1, help="Number of MPI ranks per compute node")
parser_qsub.add_argument('--name', default='job')
parser_qsub.add_argument('-t', '--wall-minutes', type=int, required=True)
parser_qsub.add_argument('-d', '--threads-per-rank',type=int, default=1)
parser_qsub.add_argument('-j', '--threads-per-core',type=int, default=1)
......@@ -215,7 +223,7 @@ def make_parser():
parser_mkchild.add_argument('--wall-minutes', type=int, required=True)
type=int, required=True)
type=int, required=True)
parser_mkchild.add_argument('--allowed-site', action='append',
......@@ -285,9 +293,9 @@ def make_parser():
help="Continuously run jobs of specified workflow")
parser_launcher.add_argument('--num-workers', type=int, default=1,
help="Theta: defaults to # nodes. BGQ: the # of subblocks")
parser_launcher.add_argument('--nodes-per-worker', type=int, default=1,
help="For non-MPI jobs, how many to pack per worker")
parser_launcher.add_argument('--max-ranks-per-node', type=int, default=1)
parser_launcher.add_argument('--nodes-per-worker', type=int, default=1)
parser_launcher.add_argument('--max-ranks-per-node', type=int, default=1,
help="How many serial jobs can run concurrently per node")
parser_launcher.add_argument('--time-limit-minutes', type=float,
help="Override auto-detected walltime limit (runs"
" forever if no limit is detected or specified)")
......@@ -64,7 +64,7 @@ def newjob(args):
job.wall_time_minutes = args.wall_minutes
job.num_nodes = args.num_nodes
job.processes_per_node = args.processes_per_node
job.ranks_per_node = args.ranks_per_node
job.threads_per_rank = args.threads_per_rank
job.threads_per_core = args.threads_per_core
......@@ -117,12 +117,13 @@ def ls(args):
name =
history = args.history
verbose = args.verbose
state = args.state
id =
tree = args.tree
wf =
if objects.startswith('job'):
lscmd.ls_jobs(name, history, id, verbose, tree, wf)
lscmd.ls_jobs(name, history, id, verbose, tree, wf, state)
elif objects.startswith('app'):
lscmd.ls_apps(name, id, verbose)
elif objects.startswith('work') or objects.startswith('wf'):
......@@ -202,7 +203,7 @@ def qsub(args):
job.wall_time_minutes = args.wall_minutes
job.num_nodes = args.nodes
job.processes_per_node = args.ppn
job.ranks_per_node = args.ranks_per_node
job.threads_per_rank = args.threads_per_rank
job.threads_per_core = args.threads_per_core
job.environ_vars = ":".join(args.env)
......@@ -275,7 +276,7 @@ def make_dummies(args):
job.wall_time_minutes = 0
job.num_nodes = 1
job.processes_per_node = 1
job.ranks_per_node = 1
job.threads_per_rank = 1
job.threads_per_core = 1
job.environ_vars = ""
......@@ -30,11 +30,12 @@ def print_jobs_tree(jobs):
roots = [j for j in jobs if j.parents=='[]']
for job in roots: print_subtree(job)
def ls_jobs(namestr, show_history, jobid, verbose, tree, wf):
def ls_jobs(namestr, show_history, jobid, verbose, tree, wf, state):
results = Job.objects.all()
if namestr: results = results.filter(name__icontains=namestr)
if jobid: results = results.filter(job_id__icontains=jobid)
if wf: results = results.filter(workflow__icontains=wf)
if state: results = results.filter(state=state)
if not results:
print("No jobs found matching query")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment