Commit b9d583d0 authored by Francois Tessier's avatar Francois Tessier

Improve Cray XC40 topology. Multiple IO nodes. Run script for Theta. New Lustre API patch.

parent 482bd15f
......@@ -6,10 +6,10 @@ libtapioca.a:tapioca.o
ar rcs $@ $?
tapioca.o:tapioca.cpp
$(MPICXX) $(MPI_CFLAGS) -c $? -o $@
$(MPICXX) $(MPI_CFLAGS) -c $? -o $@ $(MPI_LDFLAGS)
install:
cp tapioca.hpp topology/*.hpp $(INSTALL_PATH)/include
cp -r tapioca.hpp topology/*.hpp topology/include/* $(INSTALL_PATH)/include
cp libtapioca.a $(INSTALL_PATH)/lib
clean:
......
MPICC = mpicxx
MPI_CFLAGS = -g -O3 -I/bgsys/drivers/ppcfloor -I/bgsys/drivers/ppcfloor/spi/include/kernel/cnk -I${HOME}/install_bgq/include/ -DBGQ
MPI_LDFLAGS = -lstdc++ -lpthread -lm -L${HOME}/install_bgq/lib/ -ltapioca
MPICXX = CC
MPI_CFLAGS = -g -O3 -I$(HOME)/install/include/ -DXC40 -DDEBUG
MPI_LDFLAGS = -L$(HOME)/install/lib/ -ltapioca -llustreapi
all: miniHACC-AoS miniHACC-SoA miniHACC-AoS-MPIIO miniHACC-SoA-MPIIO
miniHACC-AoS:miniHACC-AoS.cpp
$(MPICC) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
$(MPICXX) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
miniHACC-SoA:miniHACC-SoA.cpp
$(MPICC) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
$(MPICXX) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
### MPI I/O
miniHACC-AoS-MPIIO:miniHACC-AoS-MPIIO.cpp
$(MPICC) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
$(MPICXX) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
miniHACC-SoA-MPIIO:miniHACC-SoA-MPIIO.cpp
$(MPICC) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
$(MPICXX) $(MPI_CFLAGS) $? -o $@ $(MPI_LDFLAGS)
clean:
rm miniHACC-AoS miniHACC-SoA miniHACC-AoS-MPIIO miniHACC-SoA-MPIIO *.o HACC.dat
......
......@@ -38,7 +38,8 @@ int main (int argc, char * argv[])
MPI_Comm_size(sub_comm, &sub_numtasks);
MPI_Comm_rank(sub_comm, &sub_myrank);
snprintf (output, 100, "/projects/visualization/ftessier/debug/HACC-AOS-%08d-%d.dat", mycolor, atoi(argv[1]));
//snprintf (output, 100, "/projects/visualization/ftessier/debug/HACC-AOS-%08d-%d.dat", mycolor, atoi(argv[1]));
snprintf (output, 100, "/lus/theta-fs0/projects/Performance/ftessier/HACC/HACC-AOS-%08d-%d.dat", mycolor, atoi(argv[1]));
/*****************/
/* WRITE */
......@@ -116,7 +117,7 @@ int main (int argc, char * argv[])
chunkOffset[i] = chunkOffset[i - 1] + chunkCount[i - 1] * chunkSize[i - 1];
}
tp.WriteInitialize (chunkCount, chunkSize, chunkOffset, 9, hdr, ARRAY_OF_STRUCTURES, sub_comm);
tp.WriteInitialize (output, chunkCount, chunkSize, chunkOffset, 9, hdr, ARRAY_OF_STRUCTURES, sub_comm);
/*****************/
start_time = MPI_Wtime();
......
......@@ -120,7 +120,7 @@ int main (int argc, char * argv[])
chunkOffset[i] += scan_size * chunkSize[i];
}
tp.WriteInitialize (chunkCount, chunkSize, chunkOffset, 9, hdr, STRUCTURE_OF_ARRAYS, sub_comm);
tp.WriteInitialize (output, chunkCount, chunkSize, chunkOffset, 9, hdr, STRUCTURE_OF_ARRAYS, sub_comm);
/*****************/
start_time = MPI_Wtime();
......
#!/bin/bash
VARS=""
NODES=10
PPN=16
NPROCS=$((NODES*PPN))
TARGET="/lus/theta-fs0/projects/Performance/ftessier/HACC"
DDT="/soft/debuggers/forge/bin/ddt --connect"
cd $HOME/TAPIOCA/examples/HACC-IO
export TAPIOCA_DEVNULL=false
export TAPIOCA_COMMSPLIT=true
export TAPIOCA_STRATEGY=TOPOLOGY_AWARE
export TAPIOCA_NBAGGR=2
export TAPIOCA_BUFFERSIZE=16777216
function updateSettings()
{
printenv | egrep "TAPIOCA_"
SETTINGS="TAPIOCA_DEVNULL=$TAPIOCA_DEVNULL"
SETTINGS="$SETTINGS TAPIOCA_COMMSPLIT=$TAPIOCA_COMMSPLIT"
SETTINGS="$SETTINGS TAPIOCA_STRATEGY=$TAPIOCA_STRATEGY"
SETTINGS="$SETTINGS TAPIOCA_NBAGGR=$TAPIOCA_NBAGGR"
SETTINGS="$SETTINGS TAPIOCA_BUFFERSIZE=$TAPIOCA_BUFFERSIZE"
}
rm $TARGET/*
updateSettings
$DDT aprun -n $NPROCS -N $PPN ./miniHACC-AoS 1
#########################
# Array of Structures
#########################
# rm $TARGET/*
# updateSettings
# runjob --block $COBALT_PARTNAME --envs $VARS $SETTINGS -p $PPN --np $NPROCS : ./miniHACC-AoS-MPIIO
# sleep 4
# let j=0
# for i in $TARGET/*
# do
# CONTROLAOS[$j]=`md5sum $i | cut -d ' ' -f1`
# echo ${CONTROLAOS[$j]}
# let j=$j+1
# done
# rm $TARGET/*
# updateSettings
# runjob --block $COBALT_PARTNAME --envs $VARS $SETTINGS -p $PPN --np $NPROCS : ./miniHACC-AoS
# sleep 4
# let j=0
# for i in $TARGET/*
# do
# HASH[$j]=`md5sum $i | cut -d ' ' -f1`
# if [ ${CONTROLAOS[$j]} == ${HASH[$j]} ]
# then
# echo -e "\e[32m[PASSED]\e[39m ${HASH[$j]}"
# else
# echo -e "\e[31m[FAILED]\e[39m ${HASH[$j]}"
# fi
# let j=$j+1
# done
# #########################
# # Structure of Arrays
# #########################
# rm $TARGET/*
# updateSettings
# runjob --block $COBALT_PARTNAME --envs $VARS $SETTINGS -p $PPN --np $NPROCS : ./miniHACC-SoA-MPIIO
# sleep 4
# let j=0
# for i in $TARGET/*
# do
# CONTROLSOA[$j]=`md5sum $i | cut -d ' ' -f1`
# echo ${CONTROLSOA[$j]}
# let j=$j+1
# done
# rm $TARGET/*
# updateSettings
# runjob --block $COBALT_PARTNAME --envs $VARS $SETTINGS -p $PPN --np $NPROCS : ./miniHACC-SoA
# sleep 4
# let j=0
# for i in $TARGET/*
# do
# HASH[$j]=`md5sum $i | cut -d ' ' -f1`
# if [ ${CONTROLSOA[$j]} == ${HASH[$j]} ]
# then
# echo -e "\e[32m[PASSED]\e[39m ${HASH[$j]}"
# else
# echo -e "\e[31m[FAILED]\e[39m ${HASH[$j]}"
# fi
# let j=$j+1
# done
MPICXX = CC
MPI_CFLAGS = -g -O3 -I./topology/ -I./topology/include/
MPI_CFLAGS += -DXC40 -llustreapi #-DDEBUG -DTIMING
MPI_CFLAGS += -DXC40 -DDEBUG #-DTIMING
MPI_LDFLAGS += -llustreapi
INSTALL_PATH = $(HOME)/install
......@@ -12,7 +12,7 @@ Tapioca::~Tapioca ()
}
void Tapioca::WriteInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
void Tapioca::WriteInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{
int chunk;
......@@ -21,6 +21,7 @@ void Tapioca::WriteInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chu
startInitTime = MPI_Wtime();
#endif
this->filename_ = filename;
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
......@@ -79,7 +80,7 @@ void Tapioca::WriteInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chu
}
void Tapioca::ReadInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
void Tapioca::ReadInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm)
{
int chunk;
......@@ -88,6 +89,7 @@ void Tapioca::ReadInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chun
startInitTime = MPI_Wtime();
#endif
this->filename_ = filename;
this->nChunks_ = nChunks;
this->chunksIndexMatching.resize (this->nChunks_);
......@@ -810,9 +812,9 @@ int Tapioca::RankLongestPath (MPI_Comm aggrComm, int64_t dataSize)
int Tapioca::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize)
{
struct { double cost; int rank; } aggrCost, minCost;
int aggrCommRank, aggrCommSize, worldRank, rank, distance, dim, hops, aggrRank;
int aggrCommRank, aggrCommSize, worldRank, rank, distance, dim, hops, aggrRank, nIOnodes;
int64_t *dataDistrib, aggregatedData = 0;
int *srcCoords, *destCoords, *globalRanks;
int *srcCoords, *destCoords, *globalRanks, *IOnodesList;
MPI_Comm_rank (aggrComm, &aggrCommRank);
MPI_Comm_size (aggrComm, &aggrCommSize);
MPI_Comm_rank (MPI_COMM_WORLD, &worldRank);
......@@ -830,13 +832,24 @@ int Tapioca::RankTopologyAware (MPI_Comm aggrComm, int64_t dataSize)
if ( rank != aggrCommRank ) {
distance = topology.DistanceBetweenRanks ( globalRanks[rank], worldRank );
aggrCost.cost = std::max ( distance * LATENCY + (double)dataDistrib[rank] / BANDWIDTH,
aggrCost.cost );
//aggrCost.cost += (distance * LATENCY + (double)dataDistrib[rank] / BANDWIDTH);
// aggrCost.cost = std::max ( distance * LATENCY + (double)dataDistrib[rank] / BANDWIDTH,
// aggrCost.cost );
aggrCost.cost += (distance * LATENCY + (double)dataDistrib[rank] / BANDWIDTH);
}
}
aggrCost.cost += topology.DistanceToIONode ( worldRank ) * LATENCY + (double)aggregatedData / BANDWIDTH;
IOnodesList = (int *) malloc ( MAX_IONODES * sizeof ( int ) );
nIOnodes = topology.IONodesPerFile (this->filename_, IOnodesList);
if ( this->commRank_ == 0 ) {
fprintf (stdout, "[LUSTRE] nLnet = %d\n", nIOnodes);
fprintf (stdout, "[LUSTRE] list = ");
for ( int i = 0; i < nIOnodes; i++ )
fprintf (stdout, "%d ", IOnodesList[i]);
fprintf (stdout, "\n");
}
//aggrCost.cost += topology.DistanceToIONode ( worldRank ) * LATENCY + (double)aggregatedData / BANDWIDTH;
if ( this->excludedNode[this->intCoords_] )
aggrCost.cost = DBL_MAX;
......
......@@ -53,13 +53,13 @@ class Tapioca
Tapioca ();
~Tapioca ();
void WriteInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
void WriteInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm);
int Write (MPI_File fileHandle, MPI_Offset offset,
void *buf, int count, MPI_Datatype datatype,
MPI_Status *status, int64_t bufOffset = 0);
void ReadInitialize (int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
void ReadInitialize (char *filename, int64_t *chunkCount, int *chunkSize, int64_t *chunkOffset,
int nChunks, int64_t offset, MEMORY_LAYOUT layout, MPI_Comm comm);
int Read (MPI_File fileHandle, MPI_Offset offset,
void *buf, int count, MPI_Datatype datatype,
......@@ -122,6 +122,8 @@ class Tapioca
int commRank_;
int commSize_;
char *filename_;
int64_t rankDataSize_;
int64_t commDataSize_;
......
......@@ -13,6 +13,8 @@
#include <hwi/include/bqc/nd_500_dcr.h>
#include <mpix.h>
#define MAX_IONODES 1
class Topology: public iTopology {
public:
......
......@@ -6,8 +6,14 @@
#include "topology.hpp"
#include <pmi.h>
extern "C" {
#include <lustre/lustreapi.h>
#include <lustre/lustre_user.h>
}
#define LNETS_PER_OST 7
#define MAX_IONODES 392
class Topology: public iTopology {
public:
......@@ -40,6 +46,49 @@ public:
return ppn;
}
/*
* 7 LNET nodes per OST
*/
int IONodesPerFile ( char* filename, int *nodesList ) {
int err, stripeCount, nLnets, i, idx, oid, l;
char fgrId [20];
int *ssuId, *ostId, *lnets;
struct find_param param = { 0 };
int ssu2fgr [] = { 0, 0, 0, 0,
2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9};
err = llapi_getstripe ( filename, &param );
if ( err )
fprintf ( stdout, "[ERROR] llapi_getstripe\n");
stripeCount = (&param)->fp_lmd->lmd_lmm.lmm_stripe_count;
nLnets = stripeCount * LNETS_PER_OST;
ssuId = (int *) malloc ( stripeCount * sizeof ( int ) );
ostId = (int *) malloc ( stripeCount * sizeof ( int ) );
// /!\ Hypothesis : OSS id == SNX - 4
for ( i = 0; i < stripeCount; i++ ) {
idx = (&param)->fp_lmd->lmd_lmm.lmm_objects[i].l_ost_idx;
ssuId[i] = idx + 4;
lnets = (int *) malloc ( LNETS_PER_OST * sizeof ( int ) );
snprintf ( fgrId, 20, "o2ib100%d", ssu2fgr[ ssuId[i] ] );
FgrToLnets ( fgrId, lnets );
for ( l = 0; l < LNETS_PER_OST; l++ )
nodesList [ i * LNETS_PER_OST + l ] = lnets [ l ];
free ( lnets );
}
return nLnets;
}
/**********************/
......@@ -85,9 +134,23 @@ public:
return 0;
}
/* Minimal distance between two ranks considering a dragonfly network */
int DistanceBetweenRanks ( int srcRank, int destRank ) {
return 0;
int dim = NetworkDimensions(), d;
int srcCoord[dim], destCoord[dim];
int distance;
RankToCoordinates ( srcRank, srcCoord );
RankToCoordinates ( destRank, destCoord );
distance = 0;
for ( d = 0; d < dim; d++ ) {
if ( srcCoord[d] != destCoord[d] )
distance++;
}
return distance;
}
......@@ -106,6 +169,45 @@ public:
void LinksList ( int* linksList ) {
}
private:
void FgrToLnets ( char *fgr_id, int *lnet ) {
int count = 0;
FILE *fp;
char fline[100];
char *lnet_list, *item;
fp = fopen("/etc/lnet/routes.conf", "r");
if ( fp == NULL ) {
fprintf ( stdout, "[ERROR] Error while opening routes.conf file!\n" );
return;
}
while ( fgets ( fline, 100, fp ) != NULL ) {
const char *c = strstr ( fline, fgr_id );
if ( c != NULL ) {
const char *b1 = strstr ( fline, "[" ) + 1;
const char *b2 = strstr ( fline, "]" );
lnet_list = ( char * ) malloc ( sizeof ( char ) * ( b2 - b1 + 1 ) );
strncpy ( lnet_list, b1, b2 - b1 );
item = strtok ( lnet_list, "," );
while ( item ) {
lnet [ count ] = atoi ( item );
item = strtok ( 0, "," );
count++;
}
}
count = 0;
}
fclose ( fp );
return;
}
};
#endif // CRAY_CX40_THETA_LUSTRE_H
......@@ -911,7 +911,7 @@ static inline size_t changelog_rec_offset(enum changelog_rec_flags crf)
static inline size_t changelog_rec_size(struct changelog_rec *rec)
{
return changelog_rec_offset(static_cast<changelog_rec_flags>(rec->cr_flags));
return changelog_rec_offset((changelog_rec_flags)rec->cr_flags);
}
static inline size_t changelog_rec_varsize(struct changelog_rec *rec)
......@@ -922,7 +922,7 @@ static inline size_t changelog_rec_varsize(struct changelog_rec *rec)
static inline
struct changelog_ext_rename *changelog_rec_rename(struct changelog_rec *rec)
{
enum changelog_rec_flags crf = static_cast<changelog_rec_flags>(rec->cr_flags & CLF_VERSION);
enum changelog_rec_flags crf = (changelog_rec_flags)(rec->cr_flags & CLF_VERSION);
return (struct changelog_ext_rename *)((char *)rec +
changelog_rec_offset(crf));
......@@ -932,8 +932,8 @@ struct changelog_ext_rename *changelog_rec_rename(struct changelog_rec *rec)
static inline
struct changelog_ext_jobid *changelog_rec_jobid(struct changelog_rec *rec)
{
enum changelog_rec_flags crf = static_cast<changelog_rec_flags>(rec->cr_flags &
(CLF_VERSION | CLF_RENAME));
enum changelog_rec_flags crf = (changelog_rec_flags)(rec->cr_flags &
(CLF_VERSION | CLF_RENAME));
return (struct changelog_ext_jobid *)((char *)rec +
changelog_rec_offset(crf));
......@@ -942,8 +942,8 @@ struct changelog_ext_jobid *changelog_rec_jobid(struct changelog_rec *rec)
/* The name follows the rename and jobid extensions, if present */
static inline char *changelog_rec_name(struct changelog_rec *rec)
{
return (char *)rec + changelog_rec_offset(static_cast<changelog_rec_flags>(rec->cr_flags &
CLF_SUPPORTED));
return (char *)rec + changelog_rec_offset((changelog_rec_flags)(rec->cr_flags &
CLF_SUPPORTED));
}
static inline size_t changelog_rec_snamelen(struct changelog_rec *rec)
......@@ -983,7 +983,7 @@ static inline void changelog_remap_rec(struct changelog_rec *rec,
char *jid_mov;
char *rnm_mov;
crf_wanted = static_cast<changelog_rec_flags>(crf_wanted & CLF_SUPPORTED);
crf_wanted = (changelog_rec_flags)(crf_wanted & CLF_SUPPORTED);
if ((rec->cr_flags & CLF_SUPPORTED) == crf_wanted)
return;
......@@ -994,9 +994,9 @@ static inline void changelog_remap_rec(struct changelog_rec *rec,
/* Locations of jobid and rename extensions in the remapped record */
jid_mov = (char *)rec +
changelog_rec_offset(static_cast<changelog_rec_flags>(crf_wanted & ~CLF_JOBID));
changelog_rec_offset((changelog_rec_flags)(crf_wanted & ~CLF_JOBID));
rnm_mov = (char *)rec +
changelog_rec_offset(static_cast<changelog_rec_flags>(crf_wanted & ~(CLF_JOBID | CLF_RENAME)));
changelog_rec_offset((changelog_rec_flags)(crf_wanted & ~(CLF_JOBID | CLF_RENAME)));
/* Move the extension fields to the desired positions */
if ((crf_wanted & CLF_JOBID) && (rec->cr_flags & CLF_JOBID))
......
--- /usr/include/lustre/lustre_user.h 2017-02-17 03:43:53.000000000 +0000
+++ lustre_user.h 2017-03-16 14:55:10.257941000 +0000
+++ lustre_user.h 2017-03-21 19:32:19.466854000 +0000
@@ -911,7 +911,7 @@
static inline size_t changelog_rec_size(struct changelog_rec *rec)
{
- return changelog_rec_offset(rec->cr_flags);
+ return changelog_rec_offset(static_cast<changelog_rec_flags>(rec->cr_flags));
+ return changelog_rec_offset((changelog_rec_flags)rec->cr_flags);
}
static inline size_t changelog_rec_varsize(struct changelog_rec *rec)
......@@ -14,7 +14,7 @@
struct changelog_ext_rename *changelog_rec_rename(struct changelog_rec *rec)
{
- enum changelog_rec_flags crf = rec->cr_flags & CLF_VERSION;
+ enum changelog_rec_flags crf = static_cast<changelog_rec_flags>(rec->cr_flags & CLF_VERSION);
+ enum changelog_rec_flags crf = (changelog_rec_flags)(rec->cr_flags & CLF_VERSION);
return (struct changelog_ext_rename *)((char *)rec +
changelog_rec_offset(crf));
......@@ -24,8 +24,8 @@
{
- enum changelog_rec_flags crf = rec->cr_flags &
- (CLF_VERSION | CLF_RENAME);
+ enum changelog_rec_flags crf = static_cast<changelog_rec_flags>(rec->cr_flags &
+ (CLF_VERSION | CLF_RENAME));
+ enum changelog_rec_flags crf = (changelog_rec_flags)(rec->cr_flags &
+ (CLF_VERSION | CLF_RENAME));
return (struct changelog_ext_jobid *)((char *)rec +
changelog_rec_offset(crf));
......@@ -35,8 +35,8 @@
{
- return (char *)rec + changelog_rec_offset(rec->cr_flags &
- CLF_SUPPORTED);
+ return (char *)rec + changelog_rec_offset(static_cast<changelog_rec_flags>(rec->cr_flags &
+ CLF_SUPPORTED));
+ return (char *)rec + changelog_rec_offset((changelog_rec_flags)(rec->cr_flags &
+ CLF_SUPPORTED));
}
static inline size_t changelog_rec_snamelen(struct changelog_rec *rec)
......@@ -45,7 +45,7 @@
char *rnm_mov;
- crf_wanted &= CLF_SUPPORTED;
+ crf_wanted = static_cast<changelog_rec_flags>(crf_wanted & CLF_SUPPORTED);
+ crf_wanted = (changelog_rec_flags)(crf_wanted & CLF_SUPPORTED);
if ((rec->cr_flags & CLF_SUPPORTED) == crf_wanted)
return;
......@@ -54,10 +54,10 @@
/* Locations of jobid and rename extensions in the remapped record */
jid_mov = (char *)rec +
- changelog_rec_offset(crf_wanted & ~CLF_JOBID);
+ changelog_rec_offset(static_cast<changelog_rec_flags>(crf_wanted & ~CLF_JOBID));
+ changelog_rec_offset((changelog_rec_flags)(crf_wanted & ~CLF_JOBID));
rnm_mov = (char *)rec +
- changelog_rec_offset(crf_wanted & ~(CLF_JOBID | CLF_RENAME));
+ changelog_rec_offset(static_cast<changelog_rec_flags>(crf_wanted & ~(CLF_JOBID | CLF_RENAME)));
+ changelog_rec_offset((changelog_rec_flags)(crf_wanted & ~(CLF_JOBID | CLF_RENAME)));
/* Move the extension fields to the desired positions */
if ((crf_wanted & CLF_JOBID) && (rec->cr_flags & CLF_JOBID))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment