diff --git a/run_pipeline.sh b/run_pipeline.sh
index fa9d87f1643a9e8d96c1bd58319f54acd4ecf8ce..e0285127a8db4fb84dc0d2b57588bad098d2c7de 100755
--- a/run_pipeline.sh
+++ b/run_pipeline.sh
@@ -114,7 +114,7 @@ megabytes_resource=$(echo "${kilobytes_tot} / 1100" | bc)
 from_dir=$(pwd)
 #cmd="(cd ${output_dir}; snakemake -s ${snakefile} --configfile ${config_base} --resources mem_mb=${megabytes_resource} $@)"
 # TODO: check that this works
-cmd="(cd ${output_dir}; echo ${USER}@${HOSTNAME}:${from_dir}/${output_dir} > pwd.txt; snakemake -s ${snakefile_base} --configfile ${config_base} --rulegraph | dot -Tpdf > rulegraph.pdf; ${SBATCH} snakemake -s ${snakefile_base} --configfile ${config_base} --resources mem_mb=${megabytes_resource} $@)"
+cmd="(cd ${output_dir}; echo ${USER}@${HOSTNAME}:${from_dir}/${output_dir} > pwd.txt; snakemake -s ${snakefile_base} --configfile ${config_base} --rulegraph | dot -Tpdf > rulegraph.pdf; snakemake -s ${snakefile_base} --configfile ${config_base} --resources mem_mb=${megabytes_resource} $@)"
 
 echo ${cmd} | tee -a ${log_base}.log
 # https://unix.stackexchange.com/a/245610/55127
diff --git a/singularity/run_pipeline.def b/singularity/run_pipeline.def
index f11d0ec3b313ec5cdb3636266017f40f09da0342..654eb6910c07ee37bac904a20e1c2a6730a18ed3 100644
--- a/singularity/run_pipeline.def
+++ b/singularity/run_pipeline.def
@@ -58,7 +58,13 @@ From:workflows_base.sif
 	/usr/local/share/doc/qaf_demux_version.txt
 
 %post
-
+	apt-get -y install fakeroot
+	apt-get -y install munge libmunge2
+	apt-get -y install hwloc libhwloc5
+	apt-get -y install libevent-2.1-6
+	apt-get -y install libpmix2
+	mkdir -p /opt/hpc/slurm
+	echo slurm:x:497:493::/home/slurm:/sbin/nologin >> /etc/passwd
 	cat /usr/local/share/doc/qaf_demux_version.txt >> /usr/local/share/doc/program_versions.txt
 	# To use the "local" python, not the system one.
 	export PATH="/usr/local/bin":$PATH
@@ -101,6 +107,7 @@ From:workflows_base.sif
 	# To avoid using python things installed in the HOME of the user
 	# (that will be mounted during container execution)
 	export PYTHONNOUSERSITE=1
+	export PATH=/opt/hpc/slurm/current/bin:"${PATH}"
 	export PATH=/usr/local/src/bioinfo_utils:"${PATH}"
 	export PATH=/usr/local/src/bioinfo_utils/Genome_preparation:"${PATH}"
 
diff --git a/singularity/run_pipeline.sh b/singularity/run_pipeline.sh
index a08914e1d620385fa423b531fa3d086fd9359a85..653e90323c32ec45835346aa26fae58632fe79b5 100755
--- a/singularity/run_pipeline.sh
+++ b/singularity/run_pipeline.sh
@@ -66,7 +66,7 @@ then
     module --version 2> /dev/null && have_modules=1
     if [ ${have_modules} ]
     then
-        module load singularity || error_exit "singularity is needed to run the pipelines (see ${install_doc})"
+        module load apptainer || error_exit "singularity is needed to run the pipelines (see ${install_doc})"
     else
         error_exit "singularity is needed to run the pipelines (see ${install_doc})"
     fi
@@ -83,14 +83,25 @@ then
     fi
 fi
 
-genome_dir="/pasteur/entites/Mhe/Genomes"
-gene_lists_dir="/pasteur/entites/Mhe/Gene_lists"
+# This should actually be taken from the pipeline config file.
+[[ ${GENOME_DIR} ]] || GENOME_DIR="/pasteur/entites/Mhe/Genomes"
+[[ ${GENE_LISTS_DIR} ]] || GENE_LISTS_DIR="/pasteur/entites/Mhe/Gene_lists"
 
-if [ ! -e ${genome_dir} -o ! -e ${gene_lists_dir} ]
+if [ ! -e ${GENOME_DIR} -o ! -e ${GENE_LISTS_DIR} ]
 then
-    error_exit "The pipelines will look for genome data in ${genome_dir} and gene lists in ${gene_lists_dir}. Make sure it's there."
+    error_exit "The pipelines will look for genome data in ${GENOME_DIR} and gene lists in ${GENE_LISTS_DIR}. Make sure it's there."
 fi
 
+#############################################
+# To run with sbatch on slurm queing system #
+#############################################
+[[ ${QOS} ]] || QOS="normal"
+[[ ${PART} ]] || PART="common"
+
+#cluster_opts="--cores 20 --cluster \'sbatch --mem={cluster.ram} --cpus-per-task={threads} --job-name={rule}-{wildcards} --qos=${QOS} --part=${PART} --mpi=none\' -j 300"
+#cmd="APPTAINERENV_USER=${USER} apptainer run --cleanenv -B /opt/hpc/slurm -B /var/run/munge -B /pasteur ${container} ${PROGNAME} ${pipeline_config} ${cluster_opts} --cluster-config ${cluster_config}"
+cmd="APPTAINERENV_USER=${USER} apptainer run --cleanenv -B /opt/hpc/slurm -B /var/run/munge -B /pasteur ${container} ${PROGNAME} $@"
+
 # This script can be called from various symbolic links.
 # The name of the link determines which snakefile to use.
 # PRO-seq and GRO-seq are actually the same pipeline
@@ -101,4 +112,4 @@ fi
 # so that it finds the Genome configuration and gene lists
 # that are expected to be in a specific location there.
 # singularity run -B /pasteur -B /run/shm:/run/shm ${container} ${PROGNAME} $@
-SINGULARITYENV_USER=${USER} singularity run --cleanenv -B /pasteur -B /run/shm:/run/shm ${container} ${PROGNAME} $@
+[[ $(hostname) = "pisa" ]] && SINGULARITYENV_USER=${USER} singularity run --cleanenv -B /pasteur -B /run/shm:/run/shm ${container} ${PROGNAME} $@ || sbatch --qos=${QOS} --part=${PART} --wrap="${cmd}"