Skip to content
Success

Console Output

Started by timer
Running as SYSTEM
Building in workspace /var/lib/jenkins/jobs/mpi_alltoallv/workspace
[SSH] script:
NODELIST=""""
PARTITION="sandybridge"
NODES="2"

hostname

. /etc/profile.d/modules.sh
# tar xvf /var/tmp/osub.tar  # galen's copy of the benchmark
cd osu-micro-benchmarks-5.8/mpi/collective
# test new hydro sw stack
#module use /hysw/spack/hydrocpu-2022-06/modules/lmod/Core
module list
which mpic++
#module load gcc openmpi
rm -f osu_alltoallv.o osu_alltoallv  *.txt
make clean
make osu_alltoallv

#newgrp hydro_bbma
#groups
#[1665769301.927146] [hydro01:11412:0]    ucp_context.c:735  UCX  WARN  network device 'mlx4_2:1' is not available, please #use one or more of: 'enp67s0'(tcp), 'enp67s0.1676'(tcp), 'ib0'(tcp), 'mlx4_0:1'(ib), 'mlx4_1:1'(ib)
#[1665769464.099522] [hydro04:10311:0]    ucp_context.c:735  UCX  WARN  network device 'mlx4_0:1' is not available, please #use one or more of: 'enp67s0'(tcp), 'enp67s0.1676'(tcp), 'ib0'(tcp), 'mlx4_1:1'(ib), 'mlx4_2:1'(ib)
#export UCX_NET_DEVICES=enp67s0
#export UCX_NET_DEVICES=mlx4_1:1,mlx4_0:1,mlx4_2:1

# if the NODELIST parameter was filled in, proceed with a targeted test , graph output to nodelist.txt
if [[ ! -z ${NODELIST} ]]
then
   echo "NODELIST, NODES: ${NODELIST} , ${NODES}"
   echo "YVALUE=1" > 16c.txt
   echo "YVALUE=1" > gpu64c.txt
   # For the customized nodelist case, run twice: console + plot
   srun --mem=32g --nodes=${NODES} --ntasks-per-node=4 --time=00:10:00 --job-name=admin_mpi \
      --partition=${PARTITION} --nodelist=${NODELIST} \
      ./osu_alltoallv 
   if [ $? -eq 0 ]
   then 
      echo "slurm and srun ran successfully"
   else
      echo "srun failed"
      exit -1
   fi
   MYLAT=`srun --mem=32g --nodes=${NODES} --ntasks-per-node=4 --time=00:10:00 --job-name=admin_mpi \
      --partition=${PARTITION} --nodelist=${NODELIST} \
      ./osu_alltoallv  | grep 65536 | sed -r "s/\s+/ /g" | cut -d" " -f2`
   echo "YVALUE=$MYLAT" > nodelist.txt
   exit
fi

# otherwise, if NODELIST is "", run the default tests as if the test were scheduled across these partitions
echo "YVALUE=1" > nodelist.txt
MYLAT=`srun --mem=32g --nodes=2 --ntasks-per-node=4 --ntasks-per-core=1  --time=00:10:00 --job-name=mpi_small \
  --partition=sandybridge \
   ./osu_alltoallv | grep 65536 | sed -r "s/\s+/ /g" | cut -d" " -f2`
   if [ $? -eq 0 ]
   then 
      echo "slurm and srun ran successfully"
   else
      echo "srun failed"
      exit -1
   fi
echo "YVALUE=$MYLAT" > 16c.txt

MYLAT=`srun --mem=32g --nodes=1 --ntasks-per-node=4 --ntasks-per-core=1 --time=00:10:00 --job-name=mpi_small \
  --partition=a100 \
  ./osu_alltoallv | grep 65536 | sed -r "s/\s+/ /g" | cut -d" " -f2`
echo "YVALUE=$MYLAT" > gpu64c.txt


[SSH] executing...
hydrol1

Currently Loaded Modules:
  1) gcc/11.3.0      3) cuda/11.7.0   5) scripts/script_paths   7) StdEnv
  2) openmpi/4.1.4   4) modtree/gpu   6) user/license_file

 

/sw/spack/hydrogpu-2022-06/apps/openmpi/4.1.4-gcc-11.3.0-i6koipj/bin/mpic++
 rm -f osu_alltoallv osu_allgatherv osu_scatterv osu_gatherv osu_reduce_scatter osu_barrier osu_reduce osu_allreduce osu_alltoall osu_bcast osu_gather osu_allgather osu_scatter osu_iallgather osu_ibcast osu_ialltoall osu_ibarrier osu_igather osu_iscatter osu_iscatterv osu_igatherv osu_iallgatherv osu_ialltoallv osu_ialltoallw osu_ireduce osu_iallreduce
rm -rf .libs _libs
rm -f *.o
rm -f ../../util/*.o
rm -f *.lo
depbase=`echo osu_alltoallv.o | sed 's|[^/]*$|.deps/&|;s|\.o$||'`;\
mpicc -DPACKAGE_NAME=\"OSU-Micro-Benchmarks\" -DPACKAGE_TARNAME=\"osu-micro-benchmarks\" -DPACKAGE_VERSION=\"5.8\" -DPACKAGE_STRING=\"OSU-Micro-Benchmarks\ 5.8\" -DPACKAGE_BUGREPORT=\"[email protected]\" -DPACKAGE_URL=\"\" -DPACKAGE=\"osu-micro-benchmarks\" -DVERSION=\"5.8\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_DLFCN_H=1 -DLT_OBJDIR=\".libs/\" -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_SYS_TIME_H=1 -DHAVE_UNISTD_H=1 -DHAVE_GETPAGESIZE=1 -DHAVE_GETTIMEOFDAY=1 -DHAVE_MEMSET=1 -DHAVE_SQRT=1 -DFIELD_WIDTH=18 -DFLOAT_PRECISION=2 -I.    -I../../util  -g -O2 -MT osu_alltoallv.o -MD -MP -MF $depbase.Tpo -c -o osu_alltoallv.o osu_alltoallv.c &&\
mv -f $depbase.Tpo $depbase.Po
depbase=`echo ../../util/osu_util.o | sed 's|[^/]*$|.deps/&|;s|\.o$||'`;\
mpicc -DPACKAGE_NAME=\"OSU-Micro-Benchmarks\" -DPACKAGE_TARNAME=\"osu-micro-benchmarks\" -DPACKAGE_VERSION=\"5.8\" -DPACKAGE_STRING=\"OSU-Micro-Benchmarks\ 5.8\" -DPACKAGE_BUGREPORT=\"[email protected]\" -DPACKAGE_URL=\"\" -DPACKAGE=\"osu-micro-benchmarks\" -DVERSION=\"5.8\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_DLFCN_H=1 -DLT_OBJDIR=\".libs/\" -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_SYS_TIME_H=1 -DHAVE_UNISTD_H=1 -DHAVE_GETPAGESIZE=1 -DHAVE_GETTIMEOFDAY=1 -DHAVE_MEMSET=1 -DHAVE_SQRT=1 -DFIELD_WIDTH=18 -DFLOAT_PRECISION=2 -I.    -I../../util  -g -O2 -MT ../../util/osu_util.o -MD -MP -MF $depbase.Tpo -c -o ../../util/osu_util.o ../../util/osu_util.c &&\
mv -f $depbase.Tpo $depbase.Po
depbase=`echo ../../util/osu_util_mpi.o | sed 's|[^/]*$|.deps/&|;s|\.o$||'`;\
mpicc -DPACKAGE_NAME=\"OSU-Micro-Benchmarks\" -DPACKAGE_TARNAME=\"osu-micro-benchmarks\" -DPACKAGE_VERSION=\"5.8\" -DPACKAGE_STRING=\"OSU-Micro-Benchmarks\ 5.8\" -DPACKAGE_BUGREPORT=\"[email protected]\" -DPACKAGE_URL=\"\" -DPACKAGE=\"osu-micro-benchmarks\" -DVERSION=\"5.8\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_DLFCN_H=1 -DLT_OBJDIR=\".libs/\" -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_SYS_TIME_H=1 -DHAVE_UNISTD_H=1 -DHAVE_GETPAGESIZE=1 -DHAVE_GETTIMEOFDAY=1 -DHAVE_MEMSET=1 -DHAVE_SQRT=1 -DFIELD_WIDTH=18 -DFLOAT_PRECISION=2 -I.    -I../../util  -g -O2 -MT ../../util/osu_util_mpi.o -MD -MP -MF $depbase.Tpo -c -o ../../util/osu_util_mpi.o ../../util/osu_util_mpi.c &&\
mv -f $depbase.Tpo $depbase.Po
/bin/sh ../../libtool  --tag=CXX   --mode=link mpic++  -g -O2   -o osu_alltoallv osu_alltoallv.o ../../util/osu_util.o ../../util/osu_util_mpi.o   -lm 
libtool: link: mpic++ -g -O2 -o osu_alltoallv osu_alltoallv.o ../../util/osu_util.o ../../util/osu_util_mpi.o  -lm
srun: job 97877 queued and waiting for resources
srun: job 97877 has been allocated resources
slurm and srun ran successfully
srun: job 97878 queued and waiting for resources
srun: job 97878 has been allocated resources

[SSH] completed
[SSH] exit-status: 0

[workspace] $ /bin/sh -xe /tmp/jenkins11732201507426899261.sh
+ scp 'HYDRO_REMOTE:osu-micro-benchmarks-5.8/mpi/collective/*.txt' /var/lib/jenkins/jobs/mpi_alltoallv/workspace
Recording plot data
Saving plot series data from: /var/lib/jenkins/jobs/mpi_alltoallv/workspace/16c.txt
Saving plot series data from: /var/lib/jenkins/jobs/mpi_alltoallv/workspace/gpu64c.txt
Saving plot series data from: /var/lib/jenkins/jobs/mpi_alltoallv/workspace/nodelist.txt
Finished: SUCCESS