#!/bin/zsh #SBATCH --partition hpi,cssb,all #SBATCH --time 7-00:00 #SBATCH --constraint Gold-6126&768G #SBATCH --nodes 8 #SBATCH --error MotionCorr/job004/run.err #SBATCH --output MotionCorr/job004/run.out #SBATCH --job-name relion3s #SBATCH --open-mode append #SBATCH --no-requeue export LD_PRELOAD="" # run the job echo "### start : $(date) ###" echo "### master: $(hostname) ###" echo "### jobid : ${SLURM_JOB_ID} ###" source /etc/profile.d/modules.sh module use /beegfs/cssb/software/modulefiles/em module load relion/3.1-gcc8-altcpu # check myself if the relion gpu flag is off FOUND=$(grep '\-\-gpu' ${0}) if [ $? -ne 1 ]; then echo "Warning: This is a CPU submission script template!" echo "Warning: It seems that the 'Use GPU acceleration' flag in the Relion 'Compute' tab is enabled." echo "ERROR: The Relion gpu flag is turned on - exiting!!!" exit 1 fi # detect allocated nodes MY_NODELIST=($(scontrol show hostnames="$SLURM_NODELIST")) # detect sockets MY_SOCKETS=($(/beegfs/cssb/software/admin/bin/likwid-topology | grep Sockets | cut -f3)) # generate hostfile for mpirun (assuming all nodes have same processors) echo "### MPI hostfile ###" tmpname=MotionCorr/job004/run.out MY_HOSTFILE=${tmpname%.out}.hostfile rm -f ${MY_HOSTFILE} master=1 for mynode in "${MY_NODELIST[@]}"; do echo "${mynode} slots=$((${MY_SOCKETS}+${master}))" >>${MY_HOSTFILE} master=0 done cat ${MY_HOSTFILE} # run relion echo "### RELION ###" mpirun --hostfile ${MY_HOSTFILE} --map-by socket --mca opal_warn_on_missing_libcuda 0 \ `which relion_run_motioncorr_mpi` --i Import/job003/movies.star --o MotionCorr/job004/ --first_frame_sum 1 --last_frame_sum -1 --use_own --j 23 --bin_factor 1 --bfactor 150 --dose_per_frame 0.75 --preexposure 0 --patch_x 4 --patch_y 4 --eer_grouping 32 --dose_weighting --only_do_unfinished --pipeline_control MotionCorr/job004/ echo "### end: $(date) ###"