#!/bin/bash

# Load fabric profiler data collector vars
# make sure ESP_ROOT is defined
if [ -z ${ESP_ROOT+x} ]; then
      echo "ERROR: ESP_ROOT is undefined, exiting."
      exit
fi
if ! [[ -f $ESP_ROOT/config/VARS.sh ]]; then
      echo "ERROR: VARS.sh does not exist at $ESP_ROOT/config as expected. Check ESP_ROOT.  Exiting."
      exit
fi
# ensure $ESP_TRACE_PATH
if [[ -z "$ESP_TRACE_PATH" ]]; then
	echo "ERROR: ESP_TRACE_PATH is undefined. Please define an *existing* location where FabPro should deposit its tracefiles when done. Exiting."
	exit
fi
# ensure $ESP_WORK
if [[ -z "$ESP_WORK" ]]; then
	echo "ERROR: ESP_WORK is undefined. Please define a location where FabPro should do its work. Exiting."
	exit
fi
# Ensure they aren't the same
if [[ "$ESP_WORK" == "$ESP_TRACE_PATH" ]]; then
    echo "ERROR: Cannot have ESP_WORK ($ESP_WORK) and ESP_TRACE_PATH ($ESP_TRACE_PATH) identical. Exiting."
    exit
fi

# Latch things and restore post VARS.sh
esp_trace_path=$ESP_TRACE_PATH
esp_work_path=$ESP_WORK
source $ESP_ROOT/config/VARS.sh
export ESP_TRACE_PATH="$esp_trace_path"
export ESP_WORK="$esp_work_path"

# Provide defaults for user params
jobManagerSel=$FPRO_DEFAULT_JM
resId=$FPRO_RESERVATION_ID
cpuType=$FPRO_SLURM_CPU_TYPE
sleepTime=$FPRO_WAIT_FOR_JOB_TO_COMPLETE_DELAY
logFileSettleTime=$FPRO_LOG_SINK_TIME
numNodes=$FPRO_NUM_NODES
numPEsPerNode=$FPRO_NUM_PES_PER_NODE
symmetricHeapSizeInMB=$FPRO_SYMMETRIC_HEAP_SIZE_IN_MB
forceOpenShmem=0
cleanupTemps=1

display_help() {
  echo ""
  echo "fpro options {default}:"
  echo "	 -j  Job scheduler select (PBS for Slingshot-based clusters, SLURM for others) {$jobManagerSel}."
  echo "	 -l  'Legacy' OpenSHMEM-based operation (limited validation w/SoS) {$forceOpenShmem}."
  echo '	 -r  PBS reservation # to submit job to. Include all reservation characters (ex:"R3"). Must be supplied when -j is PBS. Ignored for SLURM.'
  echo "         -d  When set, deletes the temp dir created in WORK_DIR upon a sucessful run {$cleanupTemps}."
  echo "         -g  Sleep time; specifies update period, in seconds, for job status messages {$sleepTime}."
  echo "         -h  Display this message."
  echo "         -n  Number of nodes {$numNodes}."
  echo "         -p  Number of PEs per node {$numPEsPerNode}."
  echo "         -s  SHMEM symmetric heap size, in MB {$symmetricHeapSizeInMB MB}."
  echo " "
  echo "fpro runs the fabric profiler data collector on the specified application."
  echo "It completes the following operations:"
  echo "  1. generates a .slurm or .pbs file based on '-j' input specification"
  echo "  2. submits job with fabric profiler instrumentation enabled"
  echo "  3. waits for job completion, displaying periodic status updates until job completes"
  echo "  4. merges trace files for fabric profiler analyzer "
  echo "  5. displays run time logs"
  echo "  6. archives traces, logs, binary, and slurm/pbs files to enable later reproducers if needed"
  echo "  7. copies results and logs to signal a host if run from a remote system"
  echo " "
  echo "Pre-requisites:"
  echo "  1. ESP_ROOT must point to a valid Fabric Profiler root directory prior to running fpro"
  echo "  2. ESP_SHMEM_ROOT must point to the SHMEM runtime's install root where shared object and binaries live prior to running fpro"
  echo "  3. ESP_TRACE_PATH must point to an existing directory where Fabric Profiler will deposit its trace files."
  echo '  4. $ESP_ROOT/config/VARS.sh must contain valid fabric profiler settings' 
  echo "  5. ESP_SHMEM_LIB must define the file name of the OpenSHMEM's runtime executable. For example, 'libsma.so' " 
  echo "  6. PIN_ROOT must define the location of the Intel Pin installation (only needed for iSHMEM mode)." 
  echo "  7. ESP_ISHM_INSTALL must define the location of the Intel SHMEM installation (only needed for iSHMEM mode)." 
  echo " "
  echo "Usage Notes:"
  echo "  1. fpro defaults are controlled by VARS.sh "
  echo "  2. fpro maintains tmp directories for each run. By default they are removed, but can be preserved via '-d 0'. "
  echo "  3. fpro operates either in legacy SHMEM (SoS) or iSHMEM mode (default). This is controlled by the '-l' switch (see above)."
  echo "  4. fpro supports up to 3 command-line arguments for an application."
  echo " "
  echo " "
  echo "Sample Usage: "
  echo -e "=>To run a legacy/SHMEM app on PBS w/4 nodes, 32 PEs/node:\n  bin/collector/fpro -j pbs -r "R123" -n 4 -p 32 -l 1 ./examples/SHMEM/sanity/"
  echo -e "=>To run a legacy/SHMEM app on SLURM:\n  bin/collector/fpro -j slurm -n 4 -p 32 -l 1 ./examples/SHMEM/sanity/"
  echo -e "=>To run an iSHMEM app on PBS:\n  bin/collector/fpro -j pbs -r "R123" -n 4 -p 32 -l 0 ./examples/iSHMEM/sycl_sanity/"
  echo -e "=>To run an iSHMEM app on PBS, preserving working directory:\n  bin/collector/fpro -j pbs -r "R123" -n 4 -p 32 -l 0 -d 0 ./examples/iSHMEM/sycl_sanity/"
  echo -e "=>To run an iSHMEM app on SLURM:\n  bin/collector/fpro -j slurm -n 4 -p 32 -l 0 ./examples/iSHMEM/sycl_sanity/"
  echo " "
  exit
}

display_info() {
  echo ""
  echo "===="
  echo "fpro [Begin]"
  echo "     [Host Name: $host]"
  echo "     [Workload: $appName $appCommandLine $appCommandLine2 $appCommandLine3]"
  if [ ! -z $cpuType ]; then
    echo "     [CPU type: $cpuType]"
  fi
  echo "     [#Nodes: $numNodes]"
  echo "     [#PEs per node: $numPEsPerNode]"
  echo "     [Job Scheduler: $jobManagerSel]"
  echo "     [Temp Work dir: $exedir]"
  echo "===="
  echo ""
}

print_msg() {
  echo "===="
  echo -e "fpro [$1]"
  echo "===="
}


generate_pbs_script(){
  # generate pbs script
  if [ "$forceOpenShmem" -eq 0 ]; then
    exec="$FPRO_SHMEM_EXEC_PBS_ISHM"
  else
    exec="$FPRO_SHMEM_EXEC_PBS"
  fi
  mkdir -m 744 -p $FPRO_TMP
  mkdir -p $exedir
  dst="$exedir/$slurmFile.pbs"

  echo "===="
  echo "fpro [Generating PBS script]"
  echo "===="

  # Start writing PBS script
  echo "#!/bin/bash -l" > $dst
  # Force all envt vars to be forwarded (default)
  echo "#PBS -V" >> $dst
  # Specify the work queue to use (if provided)
  if [ "$resId" != "$FPRO_RESERVATION_ID" ]; then
    echo "#PBS -q $resId" >> $dst
  fi
  # Specify the machine constraint(s).  Add/edit as needed.
  echo "#PBS -l nodes=$numNodes:ppn=$numPEsPerNode" >> $dst  
  # Alternative: To specify specific machines by name:
  # echo "#PBS -l select=1:host=<machname1>:ncpus=1+1:host=<machname2>:ncpus=1"

  # specify max run time if specified
  if [ ! -z "$FPRO_PBS_MAX_TIME" ]; then
    echo "#PBS -l walltime=$FPRO_PBS_MAX_TIME" >> $dst
  fi
  # Redirect stdout & stderr
  echo "#PBS -o $logfile" >> $dst
  echo "#PBS -e $logfile.err" >> $dst
  # Load up any envt required
  echo "export ESP_ROOT=$ESP_ROOT" >>  $dst
  echo "source $ESP_ROOT/config/VARS.sh" >>  $dst
  echo "export ESP_TRACE_FILE_NAME=$appName" >> $dst
  # Provide a default trace path - edit as needed.
  echo "export ESP_TRACE_PATH=$ESP_TRACE_PATH" >> $dst
  echo "export ESP_WORK=$ESP_WORK" >> $dst
  echo "export ESP_TRACE_BUFFER_SIZE=$ESP_TRACE_BUFFER_SIZE" >> $dst
  echo "export SHMEM_OFI_PROVIDER=cxi" >> $dst
  # Note: setting threading policy here. oneSHMEM expects SHMEM_THREAD_SINGLE.
  # Other options: SHMEM_THREAD_MULTIPLE, SHMEM_THREAD_FUNNELED, SHMEM_THREAD_SERIALIZED.
  # iSHMEM Note: currently only supports single threaded model.
  echo "export SHMEM_THREAD_SINGLE" >> $dst
  echo "export SHMEM_SYMMETRIC_SIZE=$symmetricHeapSizeInMB""M" >> $dst
  echo "export LD_LIBRARY_PATH=$ESP_SHMEM_ROOT/lib:\$LD_LIBRARY_PATH" >> $dst
  echo "export PATH=$ESP_SHMEM_ROOT/bin:\$PATH" >> $dst
  echo "export PMI_MAX_KVS_ENTRIES=1000000" >> $dst 
  # cxi workaround: remove as needed
  echo "export FI_CXI_OPTIMIZED_MRS=0" >> $dst
  if [ "$forceOpenShmem" -eq 0 ]; then
    # These are sampled by the pintool to determine the path.
    echo "export PathTo_PinTool_Wrapper=$ESP_ROOT/dev/collector/src/esp_ishmem_wrapper" >> $dst
    echo "export PathTo_FP_CBin=$ESP_ROOT/bin/collector/ishmem_shmem" >> $dst
    echo "export MYPINAPP=$exedir/$appName" >> $dst
    # Set up fpro as a shim
    echo "export LD_PRELOAD=$ESP_ROOT/bin/collector/ishmem_shmem/$ESP_LIB" >> $dst
    echo "$exec $PIN_ROOT/pin -probe -follow_execv -t $ESP_ROOT/dev/collector/src/esp_pintool/obj-intel64/esp_pintool.so -- $ESP_ISHM_INSTALL/bin/ishmrun $exedir/$appName $appCommandLine $appCommandLine2 $appCommandLine3" >> $dst

  else    
    echo "export LD_PRELOAD=$ESP_ROOT/bin/collector/shmem_only/$ESP_LIB" >> $dst
    echo "$exec $exedir/$appName $appCommandLine $appCommandLine2 $appCommandLine3" >> $dst
  fi

  echo "unset LD_PRELOAD" >> $dst
}

generate_slurm_script(){
  # generate slurm script
  exec="$FPRO_SHMEM_EXEC "
  mkdir -p $FPRO_TMP
  mkdir -p $exedir
  dst="$exedir/$slurmFile.slurm"

  echo "===="
  echo "fpro [Generating SLURM script: $dst]"
  echo "===="

  echo "#!/bin/bash -l" > $dst
  echo "#SBATCH --qos=$FPRO_SLURM_QUEUE" >> $dst
  echo "#SBATCH --nodes=$numNodes" >> $dst
  echo "#SBATCH $FPRO_SHMEM_EXEC_OPT_NUM_PE=$numPEsPerNode" >> $dst
  echo "#SBATCH --constraint=$FPRO_SLURM_CONSTRAINT" >> $dst
  echo "#SBATCH --licenses=$FPRO_SLURM_LICENSE" >> $dst
  
  # specify cpu type if defined
  if [ ! -z "$cpuType" ]; then
    echo "#SBATCH -C $cpuType" >> $dst
  fi
  # specify max run time if specified
  if [ ! -z "$FPRO_SLURM_MAX_TIME" ]; then
    echo "#SBATCH -t $FPRO_SLURM_MAX_TIME" >> $dst
  fi
  echo "#SBATCH -J $slurmFile" >> $dst
  echo "#SBATCH -o $logfile" >> $dst

  ## Load up any envt required
  echo "export ESP_ROOT=$ESP_ROOT" >>  $dst
  echo "source $ESP_ROOT/config/VARS.sh" >>  $dst
  echo "export ESP_TRACE_FILE_NAME=$appName" >> $dst
  # Provide a default trace path - edit as needed.
  echo "export ESP_TRACE_PATH=$ESP_TRACE_PATH" >> $dst
  echo "export ESP_WORK=$ESP_WORK" >> $dst
  echo "export ESP_TRACE_BUFFER_SIZE=$ESP_TRACE_BUFFER_SIZE" >> $dst
  echo "export SHMEM_OFI_PROVIDER=cxi" >> $dst
  # Note: setting threading policy here. oneSHMEM expects SHMEM_THREAD_SINGLE.
  # Other options: SHMEM_THREAD_MULTIPLE, SHMEM_THREAD_FUNNELED, SHMEM_THREAD_SERIALIZED.
  # iSHMEM Note: currently only supports single threaded model.
  echo "export SHMEM_THREAD_SINGLE" >> $dst
  echo "export SHMEM_SYMMETRIC_SIZE=$symmetricHeapSizeInMB""M" >> $dst
  echo "export LD_LIBRARY_PATH=$ESP_SHMEM_ROOT/lib:\$LD_LIBRARY_PATH" >> $dst
  echo "export PATH=$ESP_SHMEM_ROOT/bin:\$PATH" >> $dst
  echo "export PMI_MAX_KVS_ENTRIES=1000000" >> $dst
  # cxi workaround: remove as needed
  echo "export FI_CXI_OPTIMIZED_MRS=0" >> $dst
  if [ "$forceOpenShmem" -eq 0 ]; then
    # These are sampled by the pintool to determine the path.
    echo "export PathTo_PinTool_Wrapper=$ESP_ROOT/dev/collector/src/esp_ishmem_wrapper" >> $dst
    echo "export PathTo_FP_CBin=$ESP_ROOT/bin/collector/ishmem_shmem" >> $dst
    echo "export MYPINAPP=$exedir/$appName" >> $dst

    # Set up fpro as a shim
    echo "export LD_PRELOAD=$ESP_ROOT/bin/collector/ishmem_shmem/$ESP_LIB" >> $dst
    echo "$exec $PIN_ROOT/pin -probe -follow_execv -t $ESP_ROOT/dev/collector/src/esp_pintool/obj-intel64/esp_pintool.so -- $ESP_ISHM_INSTALL/bin/ishmrun $exedir/$appName $appCommandLine $appCommandLine2 $appCommandLine3" >> $dst

  else
    echo "export LD_PRELOAD=$ESP_ROOT/bin/collector/shmem_only/$ESP_LIB" >> $dst
    echo "$exec $exedir/$appName $appCommandLine $appCommandLine2 $appCommandLine3" >> $dst
  fi
  ##
  echo "unset LD_PRELOAD" >> $dst
}

submit_job_pbs() {
  generate_pbs_script

  # copy current version of the app binary to tmp location 
  # to allow multiple modified builds to execute
  # concurrently without interference
  cp -r $appQual $exedir/.

  # submit job and get job id
  id=$(qsub $dst)
  if [[ -z $id ]]; then
	  echo "ERROR: PBS Job Launch Failed! Exiting."
	  exit
  fi

  print_msg "running $appName in job $id"
}

submit_job_slurm() {
  generate_slurm_script

  #
  # Copy current version of the app binary to tmp location 
  # to allow multiple modified builds to execute
  # concurrently without interference
  cp -r $appQual $exedir/.

  # submit job and get job id
  id=$(sbatch --parsable $dst)
  print_msg "running $appName in job $id"
}

wait_for_pbs_job_to_complete() {
  status=waiting
  until [ "$status" = "COMPLETE" ]
    do 
      	qstat $id &> /dev/null
	retVal=$?
	if [ "$retVal" == 0 ]; then
		#echo "Detected our job, assume is still running..."
		sleep $sleepTime 
	else
		#echo "Detected our job is done, exiting."
		# Allow some settling time for log writes to sink.
		sleep $logFileSettleTime
		status="COMPLETE"
	fi
    done
  print_msg "job complete" 
}

wait_for_job_to_complete() {
  status=waiting
  until [ "$status" = "COMPLETE" ]
    do 
      # fix me add if longer than sleep delay
      #status=$(sacct -j $id --format=State%9 | grep COMPLETE)  # vibranium sacct added a slew of stderr msgs, requiring 2>&1
      #status=$(sacct -j $id --format=State%9 2>&1 | grep COMPLETE)
      #status=${status:0:8}  # for multinode runs there is a COMPLETE message for every node in sacct, so truncate to the first
      retval=$(squeue -j $id -h -t PD,R)
      if [[ -z $retval ]]; then
          status="COMPLETE"
      else
          sleep $sleepTime 
      fi
    done
  print_msg "job complete" 
}

postproc_trace_files() {
  # get trace path from logfile (yuck).
  tracePath=$(awk -F'traces: ' 'NF > 1 {print $2}' $logfile)

  # merge raw traces for analyzer
  echo "====" | tee -a $logfile
  echo "fpro [post-processing trace files in <$tracePath> ]" | tee -a $logfile

  # merge against symbols in the app version used to generate traces
  $ESP_ROOT/bin/collector/mergeFuncFile $tracePath $exedir/$appName >> $logfile
  retVal=$?
  if [[ "$retVal" == 0 ]]; then
	echo "INFO: mergeFuncFile complete!" | tee -a $logfile
  else
        echo ""
	echo "ERROR:  mergeFuncFile failed.  Exiting." | tee -a $logfile
        echo ""
	exit
  fi
  
  $ESP_ROOT/bin/collector/mergeProfileFile $tracePath >> $logfile
  retVal=$?
  if [[ "$retVal" == 0 ]]; then
	echo "INFO: mergeProfileFile complete!" | tee -a $logfile
  else
	echo "ERROR:  mergeProfileFile failed.  Exiting." | tee -a $logfile
	exit
  fi

  $ESP_ROOT/bin/collector/mergePutFile $tracePath $exedir/$appName >> $logfile
  retVal=$?
  if [[ "$retVal" == 0 ]]; then
	echo "INFO: mergePutFile complete!" | tee -a $logfile
  else
	echo "ERROR:  mergePutFile failed.  Exiting." | tee -a $logfile
	exit
  fi
  echo "====" | tee -a $logfile
}

view_log() {
  if [ -z "$quietMode" ]; then
    print_msg "$appName run-time log"
    cat $logfile
    echo ""
  fi
}

signal_host() {
  # copy log file to tracepath
  cp $logfile $tracePath/log

  # This next file may not exist
  cp $logfile.err $tracePath/log.err 2> /dev/null

  # copy app binary and slurm file to tracepath for an if-needed reproducer
  cp $appQual $tracePath/.
  cp $dst $tracePath/.
}

validate_user_input() {
  # perform Type validation on the params
  if [[ $jobManagerSel =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Detected Job Manager ($jobManagerSel) is an integer.  Please supply as a string."
	display_help
	exit
  fi
  if [[ ! $forceOpenShmem =~ ^(0|1)$ ]] ; then
	echo "ERROR: Force Open SHMEM should 0|1."
	display_help
	exit
  fi
  if [[ $resId =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Detected Reservation ID ($resId) is an integer.  Please supply as a string."
	display_help
	exit
  fi
  if [[ $cpuType =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Detected CPU Type ($cpuType) is an integer.  Please supply as a string."
	display_help
	exit
  fi
  if ! [[ $sleepTime =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Please supply Sleep Time ($sleepTime) as an integer."
	display_help
	exit
  fi
  if ! [[ $numNodes =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Please supply # nodes ($numNodes) as an integer."
	display_help
	exit
  fi
  if ! [[ $numPEsPerNode =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Please supply # PEs per Node ($numPEsPerNode) as an integer."
	display_help
	exit
  fi
  if ! [[ $symmetricHeapSizeInMB =~ ^-?[0-9]+$ ]] ; then
	echo "ERROR: Please supply Symmetric Stack Size ($symmetricHeapSizeInMB) as an integer."
	display_help
	exit
  fi
}



##############
# resume here:
##############
# ensure we know where the SHMEM runtime is.
if [[ -z $ESP_SHMEM_ROOT ]]; then
        echo "ERROR: Please define ESP_SHMEM_ROOT. It should point to the OpenSHMEM install directory root (tested with SoS).  Exiting."
        exit
fi
# ensure the SHMEM runtime (libsma.so) exists.
if ! [[ -f $ESP_SHMEM_ROOT/lib/$ESP_SHMEM_LIB ]]; then
	echo "ERROR: ESP_SHMEM_LIB: $ESP_SHMEM_LIB does not exist at $ESP_SHMEM_ROOT/lib as expected.  Exiting."
	exit
fi

# set defaults, get app and app command line params
hname=$(hostname)
#host=${hname:0:4}
host="$hname"

# get params, if any
while getopts j:l:d:r:c:g:hn:p:qs: flag
do
    case "${flag}" in
	j) jobManagerSel=${OPTARG};;
	l) forceOpenShmem=${OPTARG};;
	d) cleanupTemps=${OPTARG};;
	r) resId=${OPTARG};;
#        c) cpuType=${OPTARG};;
        g) sleepTime=${OPTARG};;
        h) display_help;;
        n) numNodes=${OPTARG};;
        p) numPEsPerNode=${OPTARG};;
        q) quietMode="1";;
        s) symmetricHeapSizeInMB=${OPTARG};;
	*) display_help;;       # default to help
    esac
done

# Type checking
validate_user_input

# Perform logical validation for PBS
if [[ $jobManagerSel == pbs && "$resId" == "$FPRO_RESERVATION_ID" ]] ; then
	echo "Warning: PBS job manager selected without a reservation ID."
fi
# For iSHMEM, we need to ensure we know where Pin is installed.
if [ "$forceOpenShmem" -eq 0 ] && [ -z "$PIN_ROOT" ] ; then
	echo "Error: Please define PIN_ROOT. It should point to the Pin installation root."
	exit
fi
# ensure ishm-enabled libesp.so exists.
if [[ "$forceOpenShmem" -eq 0 ]] && ! [[ -f $ESP_ROOT/bin/collector/shmem_only/$ESP_LIB ]]; then
	echo "ERROR: ESP_LIB: $ESP_LIB does not exist at $ESP_ROOT/bin/collector/ishmem_shmem as expected.  Exiting."
	exit
fi
# ensure legacy libesp.so exists.
if [[ "$forceOpenShmem" -eq 1 ]] && ! [[ -f $ESP_ROOT/bin/collector/ishmem_shmem/$ESP_LIB ]]; then
	echo "ERROR: ESP_LIB: $ESP_LIB does not exist at $ESP_ROOT/bin/collector/shmem_only as expected.  Exiting."
	exit
fi
if [[ "$forceOpenShmem" -eq 0 ]] && [[ -z "$ESP_ISHM_INSTALL" ]]; then
      echo "ERROR: ESP_ISHM_INSTALL is undefined. Please define the path to your Intel SHMEM installation. Exiting."
      exit
fi
# ensure ishm-enabled has iSHM install path
if [[ "$forceOpenShmem" -eq 0 ]] && ! [[ -f $ESP_ROOT/bin/collector/ishmem_shmem/$ESP_LIB ]]; then
	echo "ERROR: ESP_LIB: $ESP_LIB does not exist at $ESP_ROOT/bin/collector/ishmem_shmem as expected.  Exiting."
	exit
fi

#
# Advance to app name and command line args
#
shift $(expr $OPTIND - 1 )
if [ -z "$1" ]; then
	echo "ERROR: Please supply complete command line:"
	display_help
	exit 1
fi

# Check if there are more than 3 app args
if [ "$#" -gt 4 ]; then
    echo "Error: Too many application command-line arguments. Maximum allowed is 3."
    exit 1
fi

appQual=$1
appPath=$(dirname "$appQual")
appName=$(basename "$appQual")
appCommandLine=$2
appCommandLine2=$3
appCommandLine3=$4
slurmFile=$appName
exedir="$FPRO_TMP/$appName.$(echo $(date +%s%N | cut -b1-13))"
logfile="$exedir/log"

# display fpro info, then create job script and submit it
display_info

# parse which job manager we are using
if [ "$jobManagerSel" == pbs ]; then
	submit_job_pbs
	wait_for_pbs_job_to_complete
elif [ "$jobManagerSel" == slurm ]; then
	submit_job_slurm
	wait_for_job_to_complete
else
	echo "fpro ERROR: unknown/unspecified job manager."
	display_help
	exit
fi

# Ensure the app exists where the user claims
if [ ! -e "$appQual" ]; then
        echo ""
        echo "fpro ERROR: unknown/unspecified user application! ($appQual)"
        echo ""
        exit
fi

# post-process trace files for analyzer
postproc_trace_files

# show run log unless in quiet mode
view_log

# signal host that run is complete, also copy log to app trace directory
signal_host

# Clean up tmp since all files have been copied to the run directory
if [ "$cleanupTemps" -eq 1 ]; then
  print_msg "Cleaning up temp files at: $exedir"
  rm -Rf $exedir
  echo -e "Done. See: $tracePath for all output."
else
  print_msg "Preserving temp files at: $exedir"
fi


