diff --git a/NEWS.txt b/NEWS.txt
index 91cc4c67478e2acf1f8927ecb13141fc595bb90d..d505120819e018326d211b6e301914300b7c4284 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -1,6 +1,18 @@
 VERSION
 =======
 
+Version 4.1.1
+
+What's new in version 4.1.1?
+
+
+In this version, we added the possibility to provide gzipped files as input.
+We also use poetry to manage dependencies.
+Dependency list was updated to more recent versions of dependency packages.
+
+VERSION
+=======
+
 Version 4.1
 
 What's new in version 4.1?
diff --git a/PhageTerm.py b/PhageTerm.py
deleted file mode 100755
index 9325753dd09967856508a855b2abf83a9ecfd5b6..0000000000000000000000000000000000000000
--- a/PhageTerm.py
+++ /dev/null
@@ -1,336 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-##@file phageterm.py
-#
-# main program
-## PhageTerm software
-#
-#  Phageterm is a tool to determine phage termini and packaging strategy
-#  and other useful informations using raw sequencing reads.
-#  (This programs works with sequencing reads from a randomly
-#  sheared DNA library preparations as Illumina TruSeq paired-end or similar)
-#
-#  ----------------------------------------------------------------------
-#  Copyright (C) 2017 Julian Garneau
-#
-#   This program is free software; you can redistribute it and/or modify
-#   it under the terms of the GNU General Public License as published by
-#   the Free Software Foundation; either version 3 of the License, or
-#   (at your option) any later version.
-#   <http://www.gnu.org/licenses/gpl-3.0.html>
-#
-#   This program is distributed in the hope that it will be useful,
-#   but WITHOUT ANY WARRANTY; without even the implied warranty of
-#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#   GNU General Public License for more details.
-#  ----------------------------------------------------------------------
-#
-#  @author Julian Garneau <julian.garneau@usherbrooke.ca>
-#  @author Marc Monot <marc.monot@pasteur.fr>
-#  @author David Bikard <david.bikard@pasteur.fr>
-
- 
-### PYTHON Module
-# Base
-#import sys
-
-
-from __future__ import print_function
-
-# Multiprocessing
-import multiprocessing
-import os
-from multiprocessing import Manager
-
-
-# Project
-
-from _modules.utilities import checkReportTitle
-from _modules.functions_PhageTerm import *
-from _modules.common_readsCoverage_processing import processCovValuesForSeq
-from _modules.main_utils import setOptions,checkOptArgsConsistency
-
-
-### MAIN
-def main():
-
-    getopt=setOptions()
-    inRawDArgs, fParms, tParms, inDArgs=checkOptArgsConsistency(getopt)
-
-    # For each fasta in file
-    DR = {"Headful (pac)":{}, "COS (5')":{}, "COS (3')":{}, "COS":{}, "DTR (short)":{}, "DTR (long)":{}, "Mu-like":{}, "UNKNOWN":{}, "NEW":{}}
-    results_pos = 0
-    no_match = []
-    draw = 0 # used when one wants to draw some graphs.
-    chk_handler = RCCheckpoint_handler(tParms.chk_freq, tParms.dir_chk, tParms.test_mode)
-    ## VL: keep this code just in case we want to try GPU implementation again later.
-    # if tParms.gpu!=0:
-    #     ref_data = refData(inDArgs.refseq_liste, fParms.seed, inDArgs.hostseq)
-    #     nb_extracts=inRawDArgs.tot_reads
-    #     if (inRawDArgs.paired!=""):
-    #         nb_extracts_per_read=7
-    #     else:
-    #         nb_extracts_per_read=4
-    #     nb_extracts *= nb_extracts_per_read
-    #
-    #     gpu_mapping_res_dir = tParms.gpu_mapping_res_dir
-    #     wanted_gpu_nb_chunks = tParms.wanted_chunks
-    #     mapper = GPU_chunkMapper()
-    #     mapper.setRefData(ref_data)
-    #     mapper.setFicDir(gpu_mapping_res_dir)
-    #     nb_kmer_in_chunk = nb_extracts//wanted_gpu_nb_chunks
-    #     doMapping(nb_kmer_in_chunk, mapper, inRawDArgs.fastq, "", ref_data, nb_extracts_per_read)
-    #     if tParms.gpu_mapping_res_dir!=0:
-    #         exit() # Consider that if we put results in files, it is because we are processing large datasets on a cluster. Otherwise, go on working.
-    #
-    # if tParms.dir_cov_res!=None and tParms.gpu_mapping_res_dir!=None: # Process the mapping results produced by the GPU and put results in files
-    #     if tParms.idx_chunk==None or tParms.idx_seq==None:
-    #         print "Indicate index of chunk and sequence to process"
-    #         exit(1)
-    #     seq_info = seqInfo(inDArgs.refseq_liste[tParms.idx_seq],tParms.idx_seq, inDArgs.hostseq)
-    #     fname=os.path.join(tParms.gpu_mapping_res_dir,base_fname_rinfo+str(tParms.idx_chunk))
-    #     d_rinfo=load_d_rinfo(fname)
-    #     readsCoverageGPU_chunk(inRawDArgs.fastq, seq_info, tParms.idx_chunk, d_rinfo, fParms.edge, tParms.limit_coverage, fParms.virome, tParms.gpu_mapping_res_dir,
-    #                            tParms.dir_cov_res, logger=None)
-    #     exit() # Consider that if we put results in files, it is because we are processing large datasets on a cluster.
-
-    if tParms.multi_machine:
-        print("Running on cluster")
-        print(tParms.dir_cov_mm, tParms.seq_id, tParms.dir_seq_mm, tParms.DR_path)
-        if tParms.dir_cov_mm!=None and tParms.gpu_mapping_res_dir==None and tParms.dir_seq_mm==None: # perform mapping and readCoverage calculation and write results in file.
-            # In that case we are processing data in an embarrassingly parallel way on a cluster.
-            position = []
-            read_indices = list(range(int(inRawDArgs.tot_reads)))
-            part = chunks(read_indices, tParms.core)
-            for i in range(tParms.core):
-                position.append(next(part)[0])
-
-            position = position + [int(inRawDArgs.tot_reads)]
-            idx_refseq=chk_handler.getIdxSeq(tParms.core_id)
-            print("starting processing at sequence: ",idx_refseq)
-            for refseq in inDArgs.refseq_liste[idx_refseq:]:
-                readsCoverage(inRawDArgs, refseq, inDArgs, fParms,None,tParms.core_id, position[tParms.core_id], position[tParms.core_id + 1],
-                              tParms,chk_handler,idx_refseq)
-                print("Processed: ", idx_refseq, " sequences")
-                idx_refseq+=1
-            if tParms.core_id==0:
-                fname=os.path.join(tParms.dir_cov_mm,"nb_seq_processed.txt")
-                f=open(fname,"w")
-                f.write(str(idx_refseq))
-                f.close()
-            exit() # Consider that if we put results in files, it is because we are processing large datasets on a cluster.
-        if tParms.dir_cov_mm!=None and tParms.seq_id!=None and tParms.dir_seq_mm!=None and tParms.DR_path!=None:
-            from _modules.seq_processing import sum_readsCoverage_for_seq
-            # in that case, we are processing all the results of readCoverage sequence by sequence in an embarrassingly parallel way on a cluster.
-            sum_readsCoverage_for_seq(tParms.dir_cov_mm, tParms.seq_id, tParms.nb_pieces, inDArgs, fParms, inRawDArgs, tParms.dir_seq_mm,tParms.DR_path)
-            exit()
-        if tParms.dir_seq_mm!=None and tParms.dir_cov_mm==None and tParms.seq_id==None and tParms.DR_path!=None: # report generation
-            from _modules.generate_report import loadDR,genReport
-            loadDR(tParms.DR_path, DR)
-            genReport(fParms, inDArgs, inRawDArgs, no_match, DR)
-            exit()
-    else: # mono machine original multi processing mode.
-        ### COVERAGE
-        print("\nCalculating coverage values, please wait (may take a while)...\n")
-        start_run = time.time()
-
-        if not fParms.test_run and tParms.core == 1:
-            print("If your computer has more than 1 processor, you can use the -c or --core option to speed up the process.\n\n")
-
-
-        for refseq in inDArgs.refseq_liste:
-            jobs = []
-            manager = Manager()
-            return_dict = manager.dict()
-            position = []
-
-            read_indices = list(range(int(inRawDArgs.tot_reads)))
-            part = chunks(read_indices, tParms.core)
-            for i in range(tParms.core):
-                position.append(next(part)[0])
-
-            position = position + [int(inRawDArgs.tot_reads)]
-
-            for i in range(0, tParms.core):
-                tParms.core_id=i
-                process = multiprocessing.Process(target=readsCoverage, args=(inRawDArgs, refseq, inDArgs, fParms,return_dict, i,position[i], position[i+1],
-                                                                              tParms, chk_handler,results_pos))
-                jobs.append(process)
-
-            for j in jobs:
-                j.start()
-
-            for j in jobs:
-                j.join()
-
-            # merging results
-            for core_id in range(tParms.core):
-                if core_id == 0:
-                    termini_coverage       = return_dict[core_id][0]
-                    whole_coverage         = return_dict[core_id][1]
-                    paired_whole_coverage  = return_dict[core_id][2]
-                    phage_hybrid_coverage  = return_dict[core_id][3]
-                    host_hybrid_coverage   = return_dict[core_id][4]
-                    host_whole_coverage    = return_dict[core_id][5]
-                    list_hybrid            = return_dict[core_id][6]
-                    insert                 = return_dict[core_id][7].tolist()
-                    paired_missmatch       = return_dict[core_id][8]
-                    reads_tested           = return_dict[core_id][9]
-                else:
-                    termini_coverage      += return_dict[core_id][0]
-                    whole_coverage        += return_dict[core_id][1]
-                    paired_whole_coverage += return_dict[core_id][2]
-                    phage_hybrid_coverage += return_dict[core_id][3]
-                    host_hybrid_coverage  += return_dict[core_id][4]
-                    host_whole_coverage   += return_dict[core_id][5]
-                    list_hybrid           += return_dict[core_id][6]
-                    insert                += return_dict[core_id][7].tolist()
-                    paired_missmatch      += return_dict[core_id][8]
-                    reads_tested          += return_dict[core_id][9]
-
-            termini_coverage = termini_coverage.tolist()
-            whole_coverage = whole_coverage.tolist()
-            paired_whole_coverage = paired_whole_coverage.tolist()
-            phage_hybrid_coverage = phage_hybrid_coverage.tolist()
-            host_hybrid_coverage = host_hybrid_coverage.tolist()
-            host_whole_coverage = host_whole_coverage.tolist()
-            list_hybrid = list_hybrid.tolist()
-
-
-                        # Estimate fParms.virome run time
-            if fParms.virome:
-                end_run = time.time()
-                virome_run = int((end_run - start_run) * inDArgs.nbr_virome)
-                print("\n\nThe fasta file tested contains: " + str(inDArgs.nbr_virome) + " contigs (mean length: " + str(
-                    inDArgs.mean_virome) + ")")
-                print("\nA complete run takes approximatively (" + str(tParms.core) + " core used) : " + EstimateTime(
-                    virome_run) + "\n")
-                exit()
-
-            # Contigs without any match
-            if sum(termini_coverage[0]) + sum(termini_coverage[1]) == 0:
-                no_match.append((checkReportTitle(inDArgs.refseq_name[results_pos])))
-                continue
-
-            s_stats=processCovValuesForSeq(refseq,inDArgs.hostseq,inDArgs.refseq_name,inDArgs.refseq_liste,fParms.seed,inRawDArgs.analysis_name,inRawDArgs.tot_reads,\
-                                       results_pos,fParms.test_run, inRawDArgs.paired,fParms.edge,inRawDArgs.host,fParms.test, fParms.surrounding,\
-                                       fParms.limit_preferred,fParms.limit_fixed,fParms.Mu_threshold,termini_coverage,whole_coverage,\
-                                       paired_whole_coverage,phage_hybrid_coverage,host_hybrid_coverage, host_whole_coverage,insert,list_hybrid,reads_tested,DR)
-
-
-            results_pos += 1
-
-
-
-        ### EXPORT Data
-        if len(inDArgs.refseq_liste) == 1:
-            # Test No Match
-            if len(no_match) == 1:
-                print("\n\nERROR: No reads match, please check your reference file.")
-                exit()
-
-            # Text report only
-            if fParms.workflow:
-                WorkflowReport(inRawDArgs.analysis_name, s_stats.P_class, s_stats.P_left, s_stats.P_right, s_stats.P_type, s_stats.P_orient, s_stats.ave_whole_cov)
-            else:
-                # Statistics
-                ExportStatistics(inRawDArgs.analysis_name, whole_coverage, paired_whole_coverage, termini_coverage, s_stats.phage_plus_norm, s_stats.phage_minus_norm, inRawDArgs.paired, fParms.test_run)
-
-            # Sequence
-            ExportCohesiveSeq(inRawDArgs.analysis_name, s_stats.ArtcohesiveSeq, s_stats.P_seqcoh, fParms.test_run)
-            ExportPhageSequence(inRawDArgs.analysis_name, s_stats.P_left, s_stats.P_right, refseq, s_stats.P_orient, s_stats.Redundant, s_stats.Mu_like, \
-                                s_stats.P_class, s_stats.P_seqcoh, fParms.test_run)
-
-            # Report
-            # TODO: just pass s_stat as argument; it will be cleaner.
-            CreateReport(inRawDArgs.analysis_name, fParms.seed, s_stats.added_whole_coverage, draw, s_stats.Redundant, s_stats.P_left, s_stats.P_right, s_stats.Permuted, \
-                         s_stats.P_orient, s_stats.termini_coverage_norm_close, \
-                         s_stats.picMaxPlus_norm_close, s_stats.picMaxMinus_norm_close, s_stats.gen_len, inRawDArgs.tot_reads, s_stats.P_seqcoh, s_stats.phage_plus_norm, \
-                         s_stats.phage_minus_norm, s_stats.ArtPackmode, s_stats.termini, s_stats.forward, s_stats.reverse, s_stats.ArtOrient, s_stats.ArtcohesiveSeq, \
-                         s_stats.termini_coverage_close, s_stats.picMaxPlus_close, s_stats.picMaxMinus_close, \
-                         s_stats.picOUT_norm_forw, s_stats.picOUT_norm_rev, s_stats.picOUT_forw, s_stats.picOUT_rev, s_stats.lost_perc, s_stats.ave_whole_cov, \
-                         s_stats.R1, s_stats.R2, s_stats.R3, inRawDArgs.host, len(inDArgs.hostseq), host_whole_coverage, \
-                         s_stats.picMaxPlus_host, s_stats.picMaxMinus_host, fParms.surrounding, s_stats.drop_cov, inRawDArgs.paired, insert, phage_hybrid_coverage,\
-                         host_hybrid_coverage, s_stats.added_paired_whole_coverage, s_stats.Mu_like, fParms.test_run, s_stats.P_class, s_stats.P_type, s_stats.P_concat)
-
-            if (inRawDArgs.nrt==True): # non regression tests, dump phage class name into file for later checking.
-                fnrt=open("nrt.txt","w")
-                fnrt.write(s_stats.P_class)
-                fnrt.close()
-        else:
-            # Test No Match
-            if len(no_match) == inDArgs.nbr_virome:
-                print("\n\nERROR: No reads match, please check your reference file.")
-                exit()
-
-            # Report Resume
-            multiReport     = SummaryReport(inRawDArgs.analysis_name, DR, no_match)
-            multiCohSeq     = ""
-            multiPhageSeq   = ""
-            multiWorkflow   = "#analysis_name\tClass\tLeft\tPVal\tAdjPval\tRight\tPVal\tAdjPval\tType\tOrient\tCoverage\tComments\n"
-
-            # No Match in workflow
-            if fParms.workflow:
-                for no_match_contig in no_match:
-                    multiWorkflow += WorkflowReport(no_match_contig, "-", "-", "-", "-", "-", 0, 1)
-
-            for DPC in DR:
-                for DC in DR[DPC]:
-                    stat_dict = DR[DPC][DC]  # splat this in everywhere
-                    # Text report
-                    if fParms.workflow:
-                        multiWorkflow += WorkflowReport(phagename=DC, multi=1, **stat_dict)
-                    # Sequence
-                    idx_refseq=DR[DPC][DC]["idx_refseq_in_list"]
-                    refseq=inDArgs.refseq_liste[idx_refseq]
-                    multiCohSeq   += ExportCohesiveSeq(DC, stat_dict["ArtcohesiveSeq"], stat_dict["P_seqcoh"], fParms.test_run, 1)
-                    multiPhageSeq += ExportPhageSequence(DC, stat_dict["P_left"], stat_dict["P_right"], refseq, stat_dict["P_orient"], stat_dict["Redundant"], stat_dict["Mu_like"], stat_dict["P_class"], stat_dict["P_seqcoh"], fParms.test_run, 1)
-
-                    # Report
-                    multiReport = CreateReport(phagename=DC,
-                                               draw=draw,
-                                               multi=1,
-                                               multiReport=multiReport,
-                                               **stat_dict)
-
-            # Workflow
-            if not fParms.test:
-                if fParms.workflow:
-                    filoutWorkflow = open(inRawDArgs.analysis_name + "_workflow.txt", "w")
-                    filoutWorkflow.write(multiWorkflow)
-                    filoutWorkflow.close()
-
-                # Concatene Sequences
-                filoutCohSeq = open(inRawDArgs.analysis_name + "_cohesive-sequence.fasta", "w")
-                filoutCohSeq.write(multiCohSeq)
-                filoutCohSeq.close()
-
-                filoutPhageSeq = open(inRawDArgs.analysis_name + "_sequence.fasta", "w")
-                filoutPhageSeq.write(multiPhageSeq)
-                filoutPhageSeq.close()
-
-            # Concatene Report
-            doc = SimpleDocTemplate("%s_PhageTerm_report.pdf" % inRawDArgs.analysis_name, pagesize=letter, rightMargin=10,leftMargin=10, topMargin=5, bottomMargin=10)
-            doc.build(multiReport)
-
-
-        # Real virome run time
-        end_run = time.time()
-        virome_run = int(end_run-start_run)
-        print("\nThe fasta file tested contains: " + str(inDArgs.nbr_virome) + " contigs (mean length: " + str(inDArgs.mean_virome) + ")")
-        print("The run has taken (" + str(tParms.core) + " core used) : " + EstimateTime(virome_run) + "\n")
-        exit()
-
-
-
-if __name__ == '__main__':
-    main()
-
-
-
-
-
-
-
-
-
diff --git a/PhageTerm_env_3.yml b/PhageTerm_env_3.yml
deleted file mode 100644
index 43bf3f9fb8530a13deeb6b70bf09afc0d24cd5f0..0000000000000000000000000000000000000000
--- a/PhageTerm_env_3.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-name: PhageTerm_env_py3
-channels:
-  - conda-forge
-  - defaults
-dependencies:
-  - backports
-  - backports.functools_lru_cache
-  - backports_abc
-  - cycler
-  - libwebp-base
-  - lz4-c
-  - matplotlib-base
-  - matplotlib
-  - numpy
-  - openssl
-  - pandas
-  - patsy
-  - pillow
-  - pip
-  - pyparsing
-  - python=3.7
-  - python-dateutil
-  - python_abi
-  - pytz
-  - readline
-  - reportlab
-  - scikit-learn
-  - scipy
-  - setuptools
-  - singledispatch
-  - statsmodels
-  - tk
-  - tornado
-
diff --git a/README.md b/README.md
index d4d5f2291e35efa38068902d6f89d28768ac155e..967c9a88945ed779c69077006e9ee1d1fb08b423 100755
--- a/README.md
+++ b/README.md
@@ -1,17 +1,295 @@
-DESCRIPTION
-
-\-----------------------------------------------------------------------------------------------------------------------
-
-These tests aim at checking that refactoring in the code and porting it to python 3 do not induce changes in what is crucial in the final result.
-
-The file virome\_assembly\_raw.fa is the concatenation of all other .fasta files.
-
-AUTHOR
-
-\----------------------------------------------------------------------------------------------------------------------
-
-Véronique Legrand vlegrand@pasteur.fr
-
-Data for the tests were provided by Julian Garneau
-
-Explanation on what statistics should be carefully looked at was provided by Marc Monot
+# PROGRAM
+# =======
+
+PhageTerm.py - run as command line in a shell
+
+
+# VERSION
+# =======
+
+Version 4.1.1
+Compatible with python 3.9 up to 3.13
+
+
+# INTRODUCTION
+# ============
+
+PhageTermVirome software is a tool to determine phage genome termini and genome packaging mode on single phage or multiple contigs at once.
+The software uses phage and virome sequencing reads obtained from libraries prepared with DNA fragmented randomly (e.g. Covaris fragmentation,
+and library preparation using Illumina TruSeq). Phage or virome sequencing reads (fastq files) are aligned to the assembled phage genome or assembled
+virome (fasta or multifasta files) in order to  calculate two types of coverage values (whole genome coverage and the Starting Position Coverage (SPC)). The starting position coverage is used to perform a detailed termini and packaging mode analysis. 
+
+Mu-type phage analysis : can be done if user suspect the phage genome to be Mu-like type (Only for single phage genome analysis, not possible with multifasta file) :
+User can also provide the host (bacterial) genome sequence. The Mu-type phage analysis will take the reads that does not match the phage
+genome and align them on the bacterial genome using the same mapping function. The analysis to identify Mu-like phages is available only when providing a single phage genome (not possible if user provide a multi-fast file with multiple assembled phage contigs).
+
+
+The previous PhageTerm program (single phage analysis only) is still available at https://sourceforge.net/projects/phageterm/ (for versions <3.0.0)
+
+
+A Galaxy wrapper version is also available for the previous version at https://galaxy.pasteur.fr (only for the first version PhageTerm).
+PhageTermVirome is not implemented on Galaxy yet).
+
+Since version 4.1, PhageTerm can work in 2 modes:
+- the usual mono machine mode (parallelization on several cores on the same machine). 
+- a new multi machine mode (advanced users) with parallelization on several machines, using intermediate files for data exchange.
+The default mode is mono machine.
+
+Version 3.0.0 up to version 4.0 work with python 2.7
+
+From version 4.0 to version 4.1, PhageTerm (now PhageTermVirome) works with python 3.7
+
+version 4.1.1 works with python 3.9 up to python 3.13.
+
+
+# PREREQUISITES
+# =============
+
+## For version 4.2
+- python3.9
+- poetry (https://python-poetry.org/docs/)
+- An up to date list of dependencies can be found in the pyproject.toml file.
+
+# INSTALLATION
+# ============
+
+- install python 3.9 (or higher; up to 1.13) and pip
+- install poetry by typing
+
+    pip install poetry
+
+- to allow installation of deprecated package sklearn (use of sklearn will be replaced by scikit-learn in a next version), do
+
+    export SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL=True
+
+- download the .tar.gz file containing PhageTermVirome latest vesion from here : https://test.pypi.org/project/phagetermvirome/#files
+and unzip it. This should create a "phagetermvirome-4.2" directory. Go into it and type:
+    
+    poetry install
+
+- poetry just created a virtual environment for phageterm virome and installed all necessary dependencies in it.
+Activate the virtual environment by typing:
+
+    poetry shell
+
+- type the following command (this is a workaround for a problem that is to be fixed in the next version)
+
+    export PYTHONPATH=path/to/phagetermvirome
+
+where "path/to/phagetermvirome" is the path to the phagetermvirome subdirectory in the directory where you unzipped the archive
+
+- Type the following command to check that everything works well.
+
+    phageterm --help
+
+
+
+# COMMAND LINE USAGE
+# ==================
+
+Basic usage with mandatory options (PhageTermVirome needs at least one read file, but user can provide a second corresponding paired-end read file if available, using the -p option).
+
+	phageterm -f reads.fastq -r phage_sequence(s).fasta
+
+    
+	Help:   
+    
+        phageterm -h
+        phageterm --help
+
+
+	After installation, we recommend users to perform a software run test, use any of the following:
+    	-t TEST_VALUE, --test=TEST_VALUE
+                    TEST_VALUE=C5   : Test run for a 5' cohesive end (e.g. Lambda)                        
+               			TEST_VALUE=C3   : Test run for a 3' cohesive end (e.g. HK97)
+               			TEST_VALUE=DS   : Test run for a short Direct Terminal Repeats end (e.g. T7)
+               			TEST_VALUE=DL   : Test run for a long Direct Terminal Repeats end (e.g. T5)
+               			TEST_VALUE=H    : Test run for a Headful packaging (e.g. P1)
+               			TEST_VALUE=M    : Test run for a Mu-like packaging (e.g. Mu)
+
+
+Non-mandatory options
+
+[-p reads_paired -c nbr_core_threads --report_title name_to_write_on_report_outputs -s seed_lenght -d surrounding -g host.fasta -l contig_size_limit_multi-fasta -v virome_run_time_estimation]
+
+
+Additional advanced options (only for multi-machine users)
+
+
+[--mm --dir_cov_mm path_to_coverage_results -c nb_cores --core_id idx_core -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
+[--mm --dir_cov_mm path_to_coverage_results --dir_seq_mm path_to_sequence_results --DR_path path_to_results --seq_id index_of_sequence --nb_pieces nbr_of_read_chunks -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta] [--mm --DR_path path_to_results --dir_seq_mm path_to_sequence_results -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
+
+    
+
+
+   Detailed  ptions:
+
+
+	Raw reads file in fastq format:
+    -f INPUT_FILE, --fastq=INPUT_FILE
+                        Fastq reads 
+                        (NGS sequences from random fragmentation DNA only, 
+                        e.g. Illumina TruSeq)
+                        
+	Phage genome(s) in fasta format:
+    -r INPUT_FILE, --ref=INPUT_FILE
+                        Reference phage genome(s) as unique contig in fasta format
+
+
+
+    Other options common to both modes:
+
+  Raw reads file in fastq format:
+    -p INPUT_FILE, --paired=INPUT_FILE
+                        Paired fastq reads
+                        (NGS sequences from random fragmentation DNA only,
+                        e.g. Illumina TruSeq)
+
+	Analysis_name to write on output reports:
+    --report_title USER_REPORT_NAME, --report_title=REPORT_NAME
+                        Manually enter the name you want to have on your report outputs.
+                        Used as prefix for output files.
+
+	Lenght of the seed used for reads in the mapping process:
+    -s SEED_LENGHT, --seed=SEED_LENGHT
+                        Manually enter the lenght of the seed used for reads
+                        in the mapping process (Default: 20).
+
+	Number of nucleotides around the main peak to consider for merging adjacent significant peaks (set to 1 to discover secondary terminus but sites).  
+    -d SUROUNDING_LENGHT, --surrounding=SUROUNDING_LENGHT
+                        Manually enter the lenght of the surrounding used to
+                        merge close peaks in the analysis process (Default: 20).
+
+	Host genome in fasta format (option available only for analysis with a single phage genome):
+    -g INPUT_FILE, --host=INPUT_FILE
+                        Genome of reference host (bacterial genome) in fasta format
+                        Warning: increase drastically process time
+                        This option can be used only when analyzing a single phage genome (not available for virome contigs as multifasta)
+                        
+	Define phage mean coverage:
+    -m MEAN_NBR, --mean=MEAN_NBR
+                        Phage mean coverage to use (Default: 250).        
+
+	Define phage mean coverage:
+    -l LIMIT_FASTA, —limit=LIMIT_FASTA
+                        Minimum phage fasta length (Default: 500).
+
+
+    Options for mono machine (default) mode only
+                
+	Software run test:
+    -t TEST_VALUE, --test=TEST_VALUE
+                        TEST_VALUE=C5   : Test run for a 5' cohesive end (e.g. Lambda)                        
+               			    TEST_VALUE=C3   : Test run for a 3' cohesive end (e.g. HK97)
+               			    TEST_VALUE=DS   : Test run for a short Direct Terminal Repeats end (e.g. T7)
+               			    TEST_VALUE=DL   : Test run for a long Direct Terminal Repeats end (e.g. T5)
+               			    TEST_VALUE=H    : Test run for a Headful packaging (e.g. P1)
+               			    TEST_VALUE=M    : Test run for a Mu-like packaging (e.g. Mu)
+
+    Core processor number to use:
+    -c CORE_NBR, --core=CORE_NBR
+                        Number of core processor to use (Default: 1).
+
+
+
+    Options for multi machine mode only
+
+    Indicate that PhageTerm should run on several machines:
+    --mm
+
+
+    Options for step 1 of multi-machine mode (calculating reads coverage) on several machines
+
+    Directory for coverage results:
+    --dir_cov_mm=DIR_PATH/DIR_NAME
+                        Directory where to put coverage results.
+                        Note: it is up to the user to delete the files in this directory.
+
+    Total number of cores to use
+    -c CORE_NBR, --core=CORE_NBR
+                        Total number used accross over all machines.
+
+    Index of read chunk to process on current core
+    --core_id=IDX
+                A number between 0 and CORE_NBR-1
+
+    Directory for checkpoint files:
+    --dir_chk=DIR_PATH/DIR_NAME
+                    Directory where phageTerm will put its ceckpoints.
+                    Note: the directory must exist before launching phageTerm.
+                    If the directory already contains a file, phageTerm will start from the results contained in this file.
+
+    --chk_freq=FREQUENCY
+                    The frequency in minutes at which checkpoints must be created.
+                    Note: default value is 0 which means that no checkpoint is created.
+
+
+
+    Options for step 2 of multi-machine mode (calculating per sequence statistics from reads coverage results) on several machines
+
+    Directory for coverage results:
+    --dir_cov_mm=DIR_PATH/DIR_NAME
+                        Directory where to put coverage results.
+                        Note: it is up to the user to delete the files in this directory.
+
+    Directory for per sequence results
+    --dir_seq_mm=DIR_PATH/DIR_NAME
+                        Directory where to put the information if no match was found for one/several sequences.
+                        Note: it is up to the user to delete the files in this directory.
+
+    Directory for DR results
+    --DR_path=DIR_PATH/DIR_NAME
+                        Directory where to put the information necessary to step 3 (final report generation).
+                        This information typically includes names of phage found and per sequence statistics.
+                        Note: it is up to the user to delete the files in this directory.
+
+    Sequence identifier
+    --seq_id=IDX
+            Index of the sequence to be processed by the current phageTerm process.
+            Let N be the number of sequences given at the end of step 1.
+            Then IDX is  number between 0 and N-1.
+
+    Number of pieces
+    --nb_pieces=NP
+            Number of parts in which the reads were divided.
+            Must be the same value as given via -c at step 1 (CORE_NBR).
+
+
+    Options for step 3 of multi-machine mode (final report generation)
+
+    Directory for DR results
+    --DR_path=DIR_PATH/DIR_NAME
+                        Directory where to read the information necessary to step 3 (final report generation).
+                        This information typically includes names of phage found and per sequence statistics.
+                        Note: it is up to the user to delete the files in this directory.
+
+    Directory for per sequence results
+    --dir_seq_mm=DIR_PATH/DIR_NAME
+                        Directory where to get the information if no match was found for one/several sequences.
+                        Note: it is up to the user to delete the files in this directory.
+
+
+
+
+               
+                        
+# OUTPUT FILES
+# ==========
+
+	(i) Report (.pdf)
+	
+	(ii) Statistical table (.csv) 
+
+	(iii) File containingg contains re-organized to stat at the predicted termini (.fasta)
+	
+
+# CONTACT
+# =======
+
+Julian Garneau <julian.garneau@usherbrooke.ca> 
+
+Marc Monot <marc.monot@pasteur.fr>
+
+David Bikard <david.bikard@pasteur.fr>
+
+Véronique Legrand <vlegrand@pasteur.fr>
diff --git a/README.txt b/README.txt
deleted file mode 100644
index a4daf43e9c95289c87a3ffdd8e1c04a2ac382b83..0000000000000000000000000000000000000000
--- a/README.txt
+++ /dev/null
@@ -1,316 +0,0 @@
-PROGRAM
-=======
-
-PhageTerm.py - run as command line in a shell
-
-
-VERSION
-=======
-
-Version 4.1
-Compatible with python 3.7 and upper
-
-
-INTRODUCTION
-============
-
-PhageTermVirome software is a tool to determine phage genome termini and genome packaging mode on single phage or multiple contigs at once.
-The software uses phage and virome sequencing reads obtained from libraries prepared with DNA fragmented randomly (e.g. Covaris fragmentation,
-and library preparation using Illumina TruSeq). Phage or virome sequencing reads (fastq files) are aligned to the assembled phage genome or assembled
-virome (fasta or multifasta files) in order to  calculate two types of coverage values (whole genome coverage and the Starting Position Coverage (SPC)). The starting position coverage is used to perform a detailed termini and packaging mode analysis. 
-
-Mu-type phage analysis : can be done if user suspect the phage genome to be Mu-like type (Only for single phage genome analysis, not possible with multifasta file) :
-User can also provide the host (bacterial) genome sequence. The Mu-type phage analysis will take the reads that does not match the phage
-genome and align them on the bacterial genome using the same mapping function. The analysis to identify Mu-like phages is available only when providing a single phage genome (not possible if user provide a multi-fast file with multiple assembled phage contigs).
-
-
-The previous PhageTerm program (single phage analysis only) is still available at https://sourceforge.net/projects/phageterm/ (for versions <3.0.0)
-
-
-A Galaxy wrapper version is also available for the previous version at https://galaxy.pasteur.fr (only for the first version PhageTerm).
-PhageTermVirome is not implemented on Galaxy yet).
-
-Since version 4.1, PhageTerm can work in 2 modes:
-- the usual mono machine mode (parallelization on several cores on the same machine). 
-- a new multi machine mode (advanced users) with parallelization on several machines, using intermediate files for data exchange.
-
-The default mode is mono machine.
-Version 3.0.0 up to version 4.0 work with python 2.7
-
-Since version 4.0, PhageTerm (now PhageTermVirome) works with python 3.7
-
-
-PREREQUISITES
-=============
-
-
-For version 4.0
-
-Unix/Linux
-
-  - backports
-  - backports.functools_lru_cache
-  - backports_abc
-  - cycler
-  - libwebp-base
-  - lz4-c
-  - matplotlib-base
-  - matplotlib
-  - numpy
-  - openssl
-  - pandas
-  - patsy
-  - pillow
-  - pip
-  - pyparsing
-  - python=3.7
-  - python-dateutil
-  - python_abi
-  - pytz
-  - readline
-  - reportlab
-  - scikit-learn
-  - scipy
-  - setuptools
-  - singledispatch
-  - statsmodels
-  - tk
-  - tornado 
-
-A conda virtualenv containing python3.7 and all dependencies is provided for convenience so that users
-don't need to install anything else than miniconda or conda. (See below)
-
-
-FOR INPATIENT USERS : INSTALLING PHAGETERMVIROME USING THE CONDA VIRTUALENV (easiest option)
-============================================================================================
-
-First install miniconda if you don't have it already (you don't even need to have python 2.7 or python 3.7 installed on your machine for that since
-miniconda contains it): https://docs.conda.io/en/latest/miniconda.html
-
-Download and decompress/extract the PhageTermVirome directory available at https://gitlab.pasteur.fr/vlegrand/ptv.
-
-Then go in the PTV directory, and create the conda environment using the yml file PhageTerm_env_3.yml file for version >=4.0 (python3)
-    
-    $ conda env create -f PhageTerm_env_3.yml
-
-Then activate the environment so you can launch PhageTermVirome:
-    
-    $ conda activate PhageTerm_env_py3
-
-
-NOTE: 
-
-You can still use the old PhageTerm under python 2.7 (but no multi-fast analysis possible) using the miniconda environment from the PhageTerm_env.yml file for version<4.0 (python2). Using the following commands.
-    
-    $ conda env create -f PhageTerm_env.yml
-
-    $ conda activate PhageTerm_env
-
-
-
-COMMAND LINE USAGE
-==================
-
-Basic usage with mandatory options (PhageTermVirome needs at least one read file, but user can provide a second corresponding paired-end read file if available, using the -p option).
-
-	./PhageTerm.py -f reads.fastq -r phage_sequence(s).fasta
-
-    
-	Help:   
-    
-        ./PhageTerm.py -h
-        ./PhageTerm.py --help
-
-
-	After installation, we recommend users to perform a software run test, use any of the following:
-    	-t TEST_VALUE, --test=TEST_VALUE
-                    TEST_VALUE=C5   : Test run for a 5' cohesive end (e.g. Lambda)                        
-               			TEST_VALUE=C3   : Test run for a 3' cohesive end (e.g. HK97)
-               			TEST_VALUE=DS   : Test run for a short Direct Terminal Repeats end (e.g. T7)
-               			TEST_VALUE=DL   : Test run for a long Direct Terminal Repeats end (e.g. T5)
-               			TEST_VALUE=H    : Test run for a Headful packaging (e.g. P1)
-               			TEST_VALUE=M    : Test run for a Mu-like packaging (e.g. Mu)
-
-
-Non-mandatory options
-
-[-p reads_paired -c nbr_core_threads --report_title name_to_write_on_report_outputs -s seed_lenght -d surrounding -g host.fasta -l contig_size_limit_multi-fasta -v virome_run_time_estimation]
-
-
-Additional advanced options (only for multi-machine users)
-
-
-[--mm --dir_cov_mm path_to_coverage_results -c nb_cores --core_id idx_core -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
-[--mm --dir_cov_mm path_to_coverage_results --dir_seq_mm path_to_sequence_results --DR_path path_to_results --seq_id index_of_sequence --nb_pieces nbr_of_read_chunks -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta] [--mm --DR_path path_to_results --dir_seq_mm path_to_sequence_results -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
-
-    
-
-
-   Detailed  ptions:
-
-
-	Raw reads file in fastq format:
-    -f INPUT_FILE, --fastq=INPUT_FILE
-                        Fastq reads 
-                        (NGS sequences from random fragmentation DNA only, 
-                        e.g. Illumina TruSeq)
-                        
-	Phage genome(s) in fasta format:
-    -r INPUT_FILE, --ref=INPUT_FILE
-                        Reference phage genome(s) as unique contig in fasta format
-
-
-
-    Other options common to both modes:
-
-  Raw reads file in fastq format:
-    -p INPUT_FILE, --paired=INPUT_FILE
-                        Paired fastq reads
-                        (NGS sequences from random fragmentation DNA only,
-                        e.g. Illumina TruSeq)
-
-	Analysis_name to write on output reports:
-    --report_title USER_REPORT_NAME, --report_title=REPORT_NAME
-                        Manually enter the name you want to have on your report outputs.
-                        Used as prefix for output files.
-
-	Lenght of the seed used for reads in the mapping process:
-    -s SEED_LENGHT, --seed=SEED_LENGHT
-                        Manually enter the lenght of the seed used for reads
-                        in the mapping process (Default: 20).
-
-	Number of nucleotides around the main peak to consider for merging adjacent significant peaks (set to 1 to discover secondary terminus but sites).  
-    -d SUROUNDING_LENGHT, --surrounding=SUROUNDING_LENGHT
-                        Manually enter the lenght of the surrounding used to
-                        merge close peaks in the analysis process (Default: 20).
-
-	Host genome in fasta format (option available only for analysis with a single phage genome):
-    -g INPUT_FILE, --host=INPUT_FILE
-                        Genome of reference host (bacterial genome) in fasta format
-                        Warning: increase drastically process time
-                        This option can be used only when analyzing a single phage genome (not available for virome contigs as multifasta)
-                        
-	Define phage mean coverage:
-    -m MEAN_NBR, --mean=MEAN_NBR
-                        Phage mean coverage to use (Default: 250).        
-
-	Define phage mean coverage:
-    -l LIMIT_FASTA, —limit=LIMIT_FASTA
-                        Minimum phage fasta length (Default: 500).
-
-
-    Options for mono machine (default) mode only
-                
-	Software run test:
-    -t TEST_VALUE, --test=TEST_VALUE
-                        TEST_VALUE=C5   : Test run for a 5' cohesive end (e.g. Lambda)                        
-               			    TEST_VALUE=C3   : Test run for a 3' cohesive end (e.g. HK97)
-               			    TEST_VALUE=DS   : Test run for a short Direct Terminal Repeats end (e.g. T7)
-               			    TEST_VALUE=DL   : Test run for a long Direct Terminal Repeats end (e.g. T5)
-               			    TEST_VALUE=H    : Test run for a Headful packaging (e.g. P1)
-               			    TEST_VALUE=M    : Test run for a Mu-like packaging (e.g. Mu)
-
-    Core processor number to use:
-    -c CORE_NBR, --core=CORE_NBR
-                        Number of core processor to use (Default: 1).
-
-
-
-    Options for multi machine mode only
-
-    Indicate that PhageTerm should run on several machines:
-    --mm
-
-
-    Options for step 1 of multi-machine mode (calculating reads coverage) on several machines
-
-    Directory for coverage results:
-    --dir_cov_mm=DIR_PATH/DIR_NAME
-                        Directory where to put coverage results.
-                        Note: it is up to the user to delete the files in this directory.
-
-    Total number of cores to use
-    -c CORE_NBR, --core=CORE_NBR
-                        Total number used accross over all machines.
-
-    Index of read chunk to process on current core
-    --core_id=IDX
-                A number between 0 and CORE_NBR-1
-
-    Directory for checkpoint files:
-    --dir_chk=DIR_PATH/DIR_NAME
-                    Directory where phageTerm will put its ceckpoints.
-                    Note: the directory must exist before launching phageTerm.
-                    If the directory already contains a file, phageTerm will start from the results contained in this file.
-
-    --chk_freq=FREQUENCY
-                    The frequency in minutes at which checkpoints must be created.
-                    Note: default value is 0 which means that no checkpoint is created.
-
-
-
-    Options for step 2 of multi-machine mode (calculating per sequence statistics from reads coverage results) on several machines
-
-    Directory for coverage results:
-    --dir_cov_mm=DIR_PATH/DIR_NAME
-                        Directory where to put coverage results.
-                        Note: it is up to the user to delete the files in this directory.
-
-    Directory for per sequence results
-    --dir_seq_mm=DIR_PATH/DIR_NAME
-                        Directory where to put the information if no match was found for one/several sequences.
-                        Note: it is up to the user to delete the files in this directory.
-
-    Directory for DR results
-    --DR_path=DIR_PATH/DIR_NAME
-                        Directory where to put the information necessary to step 3 (final report generation).
-                        This information typically includes names of phage found and per sequence statistics.
-                        Note: it is up to the user to delete the files in this directory.
-
-    Sequence identifier
-    --seq_id=IDX
-            Index of the sequence to be processed by the current phageTerm process.
-            Let N be the number of sequences given at the end of step 1.
-            Then IDX is  number between 0 and N-1.
-
-    Number of pieces
-    --nb_pieces=NP
-            Number of parts in which the reads were divided.
-            Must be the same value as given via -c at step 1 (CORE_NBR).
-
-
-    Options for step 3 of multi-machine mode (final report generation)
-
-    Directory for DR results
-    --DR_path=DIR_PATH/DIR_NAME
-                        Directory where to read the information necessary to step 3 (final report generation).
-                        This information typically includes names of phage found and per sequence statistics.
-                        Note: it is up to the user to delete the files in this directory.
-
-    Directory for per sequence results
-    --dir_seq_mm=DIR_PATH/DIR_NAME
-                        Directory where to get the information if no match was found for one/several sequences.
-                        Note: it is up to the user to delete the files in this directory.
-
-
-
-
-               
-                        
-OUTPUT FILES
-==========
-
-	(i) Report (.pdf)
-	
-	(ii) Statistical table (.csv) 
-
-	(iii) File containingg contains re-organized to stat at the predicted termini (.fasta)
-	
-
-CONTACT
-=======
-
-Julian Garneau <julian.garneau@usherbrooke.ca>
-Marc Monot <marc.monot@pasteur.fr>
-David Bikard <david.bikard@pasteur.fr>
-Véronique Legrand <vlegrand@pasteur.fr>
diff --git a/_modules/IData_handling.py b/_modules/IData_handling.py
deleted file mode 100755
index 5714031cab4fc1ab4404320ed925b65664a66c28..0000000000000000000000000000000000000000
--- a/_modules/IData_handling.py
+++ /dev/null
@@ -1,340 +0,0 @@
-## @file IData_handling.py
-#
-# VL: Gather here the classes and functions useful for handling input data.
-from __future__ import print_function
-
-import gzip
-from _modules.utilities import reverseComplement,changeCase
-from time import gmtime, strftime
-import datetime
-
-try:
-    import cPickle as pickle
-except ImportError:  # python 3.x
-    import pickle
-
-
-## This class encapsulates the reference sequences, the host sequence if any and all useful information about the sequences.
-#
-# It is used both for searching the read extracts in the sequences and for exploiting the results
-class refData:
-    def __init__(self,refseq_list,seed,hostseq):
-        self.refseq_list=refseq_list
-        self.seed=seed
-        self.hostseq=hostseq
-        if hostseq!="":
-            self.refseq_list.insert(0,hostseq)
-        self.nb_sequences=len(refseq_list)
-
-    def getIdxSeq(self,refseq):
-        idx=-1
-        found=False
-        for s in self.refseq_list:
-            idx += 1
-            if s==refseq:
-                found=True
-                break
-        if not found:
-            raise RuntimeError("Couldn't find sequence in list of ref sequences.")
-        return idx
-
-
-    def IdxIsHostseq(self,idx_seq):
-        if (((self.hostseq == "") and (idx_seq <= self.nb_sequences - 1)) or (
-            (self.hostseq != "") and (idx_seq >0))):
-            return False
-        return True
-
-    def getSeqSizesList(self):
-        seq_sizes_list = []
-        for seq in self.refseq_list:
-            seq_sizes_list.append(len(seq))
-        return seq_sizes_list
-
-
-## Base class for handling read extracts.
-#
-# This class should not be used directly.
-class ReadExtracts:
-    def __init__(self,seed):
-        self.seed = seed
-        self.r_extracts_list = []
-        self.nb_reads = 0
-        self.nb_extracts=0
-
-    ## Returns the list of read extracts from the loaded dataset, the number of reads and the total number of extracts
-    def getRExtracts(self):
-        return self.r_extracts_list,self.nb_reads,self.nb_extracts
-
-## Class containing all the read extracts (PE reads) that must be mapped against a sequence.
-class readExtractsPE(ReadExtracts):
-    def __init__(self,seed):
-        self.__init__(seed)
-
-
-    def addRead(self, whole_PE1,whole_PE2):
-        self.r_extracts_list.append(whole_PE1[:self.seed])
-        self.r_extracts_list.append(whole_PE1[-self.seed:])
-        self.r_extracts_list.append(whole_PE2[:self.seed])
-        self.r_extracts_list.append(reverseComplement(whole_PE2)[:self.seed])
-        self.r_extracts_list.append(reverseComplement(whole_PE2)[-self.seed:])
-        self.nb_reads += 1
-        self.nb_extracts += 5  # Number of extracts per read: 2 extracts for PE1 and 3 for PE2.
-
-
-
-## Class containing all the read extracts (single reads) that must be mapped against a sequence.
-class readsExtractsS(ReadExtracts):
-    def __init__(self,seed):
-        ReadExtracts.__init__(self,seed)
-
-    ## Adds a read to the list of extracts
-    #
-    # @param whole_read The read as extracted from the fastq file
-    # @param no_pair This paramenter s only used to make the distinction between Single and paired.
-    # Note VL: I didn't use meta programming here because I thought that it would have a negative impact on performance.
-    # TODO: test it when all the rest works.
-    def addRead(self,whole_read,no_pair=""):
-        read_part = whole_read[:self.seed]
-        self.r_extracts_list.append(read_part)
-        self.r_extracts_list.append(whole_read[-self.seed:])
-        self.r_extracts_list.append(reverseComplement(whole_read)[:self.seed])
-        self.r_extracts_list.append(reverseComplement(whole_read)[-self.seed:])
-        self.nb_reads+=1
-        self.nb_extracts += 4
-
-## use objects of this class to store read offset (PE1 and PE2) in files.
-class ReadInfo:
-    def __init__(self, off_PE1,whole_read,seed,off_PE2=None):
-        self.offset1=off_PE1
-        self.offset2=off_PE2
-        self.corlen = len(whole_read) - seed
-
-## Gets the number of reads in the fastq file
-# def getNbReads(fastq):
-#     with open(fastq) as f:
-#         for i, l in enumerate(f):
-#             pass
-#     nb_r=i+1
-#     nb_r=nb_r/4
-#     return nb_r
-
-
-
-## loads a chunk of reads for mapping on GPU.
-# Yields a ReadExtracts object plus a dictionnary of ReadInfo.
-# keeps in memory the parsing state of the file.
-# @param ch_size is in number of reads
-# @reset_ids indicates whether or not we want read index to be reset to 0 at the beginning of each chunk.
-def getChunk(fastq,seed,paired,ch_siz,reset_ids=True):
-    new_chunk = False
-    d_rinfo=dict()
-    idx_read=0
-    off2=None
-    filin = open(fastq)
-    line = filin.readline()
-    read_paired=""
-    if paired != "":
-        RE=readExtractsPE(seed)
-        filin_paired = open(paired)
-        line_paired = filin_paired.readline()
-    else:
-        RE=readsExtractsS(seed)
-
-    start = False
-    num_line=0
-    while line:
-        # Read sequence
-        read = line.split("\n")[0].split("\r")[0]
-        if paired != "":
-            read_paired = line_paired.split("\n")[0].split("\r")[0]
-        if (read[0] == '@' and num_line%4 == 0): # make sure we don't take into account a quality score instead of a read.
-            start = True
-            off1=filin.tell()
-            line = filin.readline()
-            if paired != "":
-                off2=filin_paired.tell()
-                line_paired = filin_paired.readline()
-            continue
-        if (start == True):
-            start = False
-            readlen = len(read)
-            if readlen < seed:
-                line = filin.readline()
-                if paired !="":
-                    line_paired = filin_paired.readline() # also skip PE2 in that case
-                continue
-            RE.addRead(read,read_paired)
-            d_rinfo[idx_read]=ReadInfo(off1,read,seed,off2)
-            if (idx_read>0 and ((idx_read+1)%(ch_siz)==0)):
-                yield RE,d_rinfo
-                if (reset_ids):
-                    idx_read=0
-                    new_chunk=True
-                if paired != "":
-                    RE = readExtractsPE(seed)
-                else:
-                    RE = readsExtractsS(seed)
-                d_rinfo = dict()
-            if not new_chunk:
-                idx_read+=1
-            else:
-                new_chunk=False
-
-        line = filin.readline()
-        if paired!="":
-            line_paired = filin_paired.readline()
-    filin.close()
-    if paired !="":
-        filin_paired.close()
-    yield RE, d_rinfo
-
-## dumps a dictionnary of ReadInfo objects indexed on read index.
-#
-# @param d_rinfo dictionnary to dump
-# @param fic filename (incl. full path) where to dump
-def dump_d_rinfo(d_rinfo,fic):
-    with open(fic, 'wb') as fp:
-        pickle.dump(d_rinfo, fp, protocol=pickle.HIGHEST_PROTOCOL)
-
-## Loads a dictionnary of ReadInfo objects.
-def load_d_rinfo(fic):
-    with open(fic, 'rb') as fp:
-        d_rinfo = pickle.load(fp)
-    return d_rinfo
-
-
-## loads all extracts of reads into a list for processing on GPU.
-#
-# returns 1 or 2 readExtracts objects plus a dictionnary of ReadInfo.
-def getAllReads(fastq,seed,paired):
-    d_rinfo=dict()
-    idx_read=0
-    off2=None
-    filin = open(fastq)
-    line = filin.readline()
-    read_paired=""
-
-    if paired != "":
-        RE=readExtractsPE(seed)
-        filin_paired = open(paired)
-        line_paired = filin_paired.readline()
-    else:
-        RE=readsExtractsS(seed)
-
-    start = False
-    num_line=0
-    while line:
-        # Read sequence
-        read = line.split("\n")[0].split("\r")[0]
-        if paired != "":
-            read_paired = line_paired.split("\n")[0].split("\r")[0]
-        if (read[0] == '@' and num_line%4 == 0): # make sure we don't take into account a quality score instead of a read.
-            start = True
-            off1=filin.tell()
-            line = filin.readline()
-            if paired != "":
-                off2=filin_paired.tell()
-                line_paired = filin_paired.readline()
-            continue
-        if (start == True):
-            start = False
-            readlen = len(read)
-            if readlen < seed:
-                line = filin.readline()
-                if paired !="":
-                    line_paired = filin_paired.readline() # also skip PE2 in that case
-                continue
-            RE.addRead(read,read_paired)
-            d_rinfo[idx_read]=ReadInfo(off1,read,seed,off2)
-            idx_read+=1
-
-        line = filin.readline()
-        if paired!="":
-            line_paired = filin_paired.readline()
-    filin.close()
-    if paired !="":
-        filin_paired.close()
-    return RE,d_rinfo
-
-## use this class to retrieve reads from fastq file.
-class ReadGetter:
-    ## constructor
-    #
-    # @param fastq Name of the fastq file that contains the read
-    # @param d_rinfo A dictionnary of ReadInfo objects that contains the offset indicating where the read starts in the file.
-    # @param paired The name of the file containing the PE2 (defaults to "").
-    def __init__(self,fastq,d_rinfo,paired=""):
-        self.filin=open(fastq)
-        self.d_rinfo=d_rinfo
-        self.paired=paired
-        if paired!="":
-            self.filinp=open(fastq)
-
-    def getOneRead(self,idx_read):
-        read_paired=""
-        self.filin.seek(self.d_rinfo[idx_read].offset1)
-        read=self.filin.readline()
-        if self.paired!="":
-            self.filinp.seek(self.d_rinfo[idx_read].offset2)
-            read_paired = self.filinp.readline()
-        return read,read_paired
-
-
-### READS Functions
-def totReads(filin):
-    """Verify and retrieve the number of reads in the fastq file before alignment"""
-    if filin.endswith('.gz'):
-        filein = gzip.open(filin, 'rb')
-    else:
-        filein = open(filin, 'r')
-
-    line = 0
-    while filein.readline():
-        line += 1
-    seq = float(round(line / 4))
-    filein.close()
-    return seq
-
-### SEQUENCE parsing function
-def genomeFastaRecovery(filin, limit_reference, edge, host_test = 0):
-    """Get genome sequence from fasta file"""
-    print("recovering genome from: ",filin)
-    print(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
-    if filin == "":
-        return "", "", ""
-
-    #infile = open(filin, 'r')
-    infile = gzip.open(filin, "rt") if filin.endswith(".gz") else open(filin, 'r')
-    name = []
-    genome = []
-    genome_line = ""
-    genome_rejected = 0
-    for line in infile:
-        if line[0] == ">":
-            if name != []:
-                if len(genome_line) >= limit_reference:
-                    genome.append(genome_line[-edge:] + genome_line + genome_line[:edge])
-                else:
-                    genome_rejected += 1
-                    name = name[:-1]
-                genome_line = ""
-            name.append(line[1:].split('\r')[0].split('\n')[0])
-        else:
-            genome_line += changeCase(line).replace(' ', '').split('\r')[0].split('\n')[0]
-
-    if len(genome_line) >= limit_reference:
-        genome.append(genome_line[-edge:] + genome_line + genome_line[:edge])
-        genome_line = ""
-    else:
-        genome_rejected += 1
-        name = name[:-1]
-
-    infile.close()
-
-    if host_test:
-        return "".join(genome)
-    else:
-        return genome, name, genome_rejected
-    close(filin)
-
diff --git a/_modules/__init__.py b/_modules/__init__.py
deleted file mode 100644
index fc80254b619d488138a43632b617124a3d324702..0000000000000000000000000000000000000000
--- a/_modules/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-pass
\ No newline at end of file
diff --git a/_modules/common_readsCoverage_processing.py b/_modules/common_readsCoverage_processing.py
deleted file mode 100644
index dd37d7fbc2ed7cb3e82d1dd86ca24468677a6463..0000000000000000000000000000000000000000
--- a/_modules/common_readsCoverage_processing.py
+++ /dev/null
@@ -1,695 +0,0 @@
-## @file common_readsCoverage_processing.py
-#
-# VL: here I gathered functions that are common to both GPU and mono/multi CPU versions.
-# These functions are called after the mapping is done and all the counters are filled from mapping output results.
-from __future__ import print_function
-
-from time import gmtime, strftime
-import heapq
-import itertools
-
-import numpy as np
-import pandas as pd
-# Statistics
-from scipy import stats
-from statsmodels.sandbox.stats.multicomp import multipletests
-from sklearn.tree import DecisionTreeRegressor #TODO VL: fix issue on importing that
-
-from _modules.utilities import checkReportTitle
-from _modules.SeqStats import SeqStats
-
-import os
-
-
-k_no_match_for_contig=1
-
-def wholeCov(whole_coverage, gen_len):
-    """Calculate the coverage for whole read alignments and its average"""
-    if gen_len == 0:
-        return whole_coverage, 1
-    total_cov = sum(whole_coverage[0]) + sum(whole_coverage[1])
-    ave_whole_cov = float(total_cov) / (2 * float(gen_len))
-    added_whole_coverage = [x + y for x, y in zip(whole_coverage[0], whole_coverage[1])]
-    return added_whole_coverage, ave_whole_cov
-
-def testwholeCov(added_whole_coverage, ave_whole_cov, test):
-    """Return information about whole coverage."""
-    if test:
-        return ""
-    if ave_whole_cov < 50:
-        print("\nWARNING: average coverage is under the limit of the software (50)")
-    elif ave_whole_cov < 200:
-        print("\nWARNING: average coverage is low (<200), Li's method is presumably unreliable\n")
-    drop_cov = []
-    start_pos = last_pos = count_pos = 0
-    for pos in range(len(added_whole_coverage)):
-        if added_whole_coverage[pos] < (ave_whole_cov / 1.5):
-            if pos == last_pos+1:
-                count_pos += 1
-                last_pos = pos
-            else:
-                if count_pos > 100:
-                    drop_cov.append( (start_pos,last_pos+1) )
-                last_pos = start_pos = pos
-                count_pos = 0
-            last_pos = pos
-    return drop_cov
-
-def maxPaired(paired_whole_coverage, whole_coverage):
-    """Max paired coverage using whole coverage, counter edge effect with paired-ends."""
-    pwc = paired_whole_coverage[:]
-    wc = whole_coverage[:]
-    for i in range(len(pwc)):
-        for j in range(len(pwc[i])):
-            if pwc[i][j] < wc[i][j]:
-                pwc[i][j] = wc[i][j]
-    return pwc
-
-def replaceNormMean(norm_cov):
-    """Replace the values not normalised due to covLimit by mean."""
-    nc_sum = nc_count = 0
-    for nc in norm_cov:
-        if nc > 0:
-            nc_sum += nc
-            nc_count += 1
-    if nc_count == 0:
-        mean_nc = 0
-    else:
-        mean_nc = nc_sum / float(nc_count)
-    for i in range(len(norm_cov)):
-        if norm_cov[i] == 0:
-            norm_cov[i] = mean_nc
-    return norm_cov, mean_nc
-
-def normCov(termini_coverage, whole_coverage, covLimit, edge):
-    """Return the termini_coverage normalised by the whole coverage (% of coverage due to first base)."""
-    normalised_coverage = [len(termini_coverage[0])*[0], len(termini_coverage[0])*[0]]
-    termini_len = len(termini_coverage[0])
-    mean_nc = [1,1]
-    for i in range(len(termini_coverage)):
-        for j in range(len(termini_coverage[i])):
-            if j < edge or j > termini_len-edge:
-                continue
-            if whole_coverage[i][j] >= covLimit:
-                if float(whole_coverage[i][j]) != 0:
-                    normalised_coverage[i][j] = float(termini_coverage[i][j]) / float(whole_coverage[i][j])
-                else:
-                    normalised_coverage[i][j] = 0
-            else:
-                normalised_coverage[i][j] = 0
-        normalised_coverage[i], mean_nc[i] = replaceNormMean(normalised_coverage[i])
-    return normalised_coverage, mean_nc
-
-def RemoveEdge(tableau, edge):
-    return tableau[edge:-edge]
-
-def usedReads(coverage, tot_reads):
-    """Retrieve the number of reads after alignment and calculate the percentage of reads lost."""
-    used_reads = sum(coverage[0]) + sum(coverage[1])
-    lost_reads = tot_reads - used_reads
-    lost_perc = (float(tot_reads) - float(used_reads))/float(tot_reads) * 100
-    return used_reads, lost_reads, lost_perc
-
-### PEAK functions
-def picMax(coverage, nbr_pic):
-    """COORDINATES (coverage value, position) of the nbr_pic largest coverage value."""
-    if coverage == [[],[]] or coverage == []:
-        return "", "", ""
-    picMaxPlus = heapq.nlargest(nbr_pic, zip(coverage[0], itertools.count()))
-    picMaxMinus = heapq.nlargest(nbr_pic, zip(coverage[1], itertools.count()))
-    TopFreqH = max(max(np.array(list(zip(*picMaxPlus))[0])), max(np.array(list(zip(*picMaxMinus))[0])))
-    return picMaxPlus, picMaxMinus, TopFreqH
-
-def RemoveClosePicMax(picMax, gen_len, nbr_base):
-    """Remove peaks that are too close of the maximum (nbr_base around)"""
-    if nbr_base == 0:
-        return picMax[1:], [picMax[0]]
-    picMaxRC = picMax[:]
-    posMax = picMaxRC[0][1]
-    LimSup = posMax + nbr_base
-    LimInf = posMax - nbr_base
-    if LimSup < gen_len and LimInf >= 0:
-        PosOut = list(range(LimInf,LimSup))
-    elif LimSup >= gen_len:
-        TurnSup = LimSup - gen_len
-        PosOut = list(range(posMax,gen_len))+list(range(0,TurnSup)) + list(range(LimInf,posMax))
-    elif LimInf < 0:
-        TurnInf = gen_len + LimInf
-        PosOut = list(range(0,posMax))+list(range(TurnInf,gen_len)) + list(range(posMax,LimSup))
-    picMaxOK = []
-    picOUT = []
-    for peaks in picMaxRC:
-        if peaks[1] not in PosOut:
-            picMaxOK.append(peaks)
-        else:
-            picOUT.append(peaks)
-    return picMaxOK, picOUT
-
-def addClosePic(picList, picClose, norm = 0):
-    """Add coverage value of close peaks to the top peak. Remove picClose in picList if exist."""
-    if norm:
-        if picClose[0][0] >= 0.5:
-            return picList, [picClose[0]]
-    picListOK = picList[:]
-    cov_add = 0
-    for cov in picClose:
-        cov_add += cov[0]
-        picListOK[cov[1]] = 0.01
-    picListOK[picClose[0][1]] = cov_add
-    return picListOK, picClose
-
-def remove_pics(arr,n):
-    '''Removes the n highest values from the array'''
-    arr=np.array(arr)
-    pic_pos=arr.argsort()[-n:][::-1]
-    arr2=np.delete(arr,pic_pos)
-    return arr2
-
-def gamma(X):
-    """Apply a gamma distribution."""
-    X = np.array(X, dtype=np.int64)
-    v = remove_pics(X, 3)
-
-    dist_max = float(max(v))
-    if dist_max == 0:
-        return np.array([1.00] * len(X))
-
-    actual = np.bincount(v)
-    fit_alpha, fit_loc, fit_beta = stats.gamma.fit(v)
-    expected = stats.gamma.pdf(np.arange(0, dist_max + 1, 1), fit_alpha, loc=fit_loc, scale=fit_beta) * sum(actual)
-
-    return stats.gamma.pdf(X, fit_alpha, loc=fit_loc, scale=fit_beta)
-
-
-# STATISTICS
-def test_pics_decision_tree(whole_coverage, termini_coverage, termini_coverage_norm, termini_coverage_norm_close):
-    """Fits a gamma distribution using a decision tree."""
-    L = len(whole_coverage[0])
-    res = pd.DataFrame({"Position": np.array(range(L)) + 1, "termini_plus": termini_coverage[0],
-                        "SPC_norm_plus": termini_coverage_norm[0], "SPC_norm_minus": termini_coverage_norm[1],
-                        "SPC_norm_plus_close": termini_coverage_norm_close[0],
-                        "SPC_norm_minus_close": termini_coverage_norm_close[1], "termini_minus": termini_coverage[1],
-                        "cov_plus": whole_coverage[0], "cov_minus": whole_coverage[1]})
-
-    res["cov"] = res["cov_plus"].values + res["cov_minus"].values
-
-    res["R_plus"] = list(map(float, termini_coverage[0])) // np.mean(termini_coverage[0])
-    res["R_minus"] = list(map(float, termini_coverage[1])) // np.mean(termini_coverage[1])
-
-    regr = DecisionTreeRegressor(max_depth=3, min_samples_leaf=100)
-    X = np.arange(L)
-    X = X[:, np.newaxis]
-    y = res["cov"].values
-    regr.fit(X, y)
-
-    # Predict
-    y_1 = regr.predict(X)
-    res["covnode"] = y_1
-    covnodes = np.unique(y_1)
-    thres = np.mean(whole_coverage[0]) / 2
-    covnodes = [n for n in covnodes if n > thres]
-
-    for node in covnodes:
-        X = res[res["covnode"] == node]["termini_plus"].values
-        res.loc[res["covnode"] == node, "pval_plus"] = gamma(X)
-        X = res[res["covnode"] == node]["termini_minus"].values
-        res.loc[res["covnode"] == node, "pval_minus"] = gamma(X)
-
-    res.loc[res.pval_plus > 1, 'pval_plus'] = 1.00
-    res.loc[res.pval_minus > 1, 'pval_minus'] = 1.00
-    res = res.fillna(1.00)
-
-    res['pval_plus_adj'] = multipletests(res["pval_plus"].values, alpha=0.01, method="bonferroni")[1]
-    res['pval_minus_adj'] = multipletests(res["pval_minus"].values, alpha=0.01, method="bonferroni")[1]
-
-    res = res.fillna(1.00)
-
-    res_plus = pd.DataFrame(
-        {"Position": res['Position'], "SPC_std": res['SPC_norm_plus'] * 100, "SPC": res['SPC_norm_plus_close'] * 100,
-         "pval_gamma": res['pval_plus'], "pval_gamma_adj": res['pval_plus_adj']})
-    res_minus = pd.DataFrame(
-        {"Position": res['Position'], "SPC_std": res['SPC_norm_minus'] * 100, "SPC": res['SPC_norm_minus_close'] * 100,
-         "pval_gamma": res['pval_minus'], "pval_gamma_adj": res['pval_minus_adj']})
-
-    res_plus.sort_values("SPC", ascending=False, inplace=True)
-    res_minus.sort_values("SPC", ascending=False, inplace=True)
-
-    res_plus.reset_index(drop=True, inplace=True)
-    res_minus.reset_index(drop=True, inplace=True)
-
-    return res, res_plus, res_minus
-
-### SCORING functions
-# Li's methodology
-def ratioR1(TopFreqH, used_reads, gen_len):
-    """Calculate the ratio H/A (R1) = highest frequency/average frequency. For Li's methodology."""
-    AveFreq = (float(used_reads)/float(gen_len)/2)
-    if AveFreq == 0:
-        return 0, 0
-    R1 = float(TopFreqH)/float(AveFreq)
-    return R1, AveFreq
-
-def ratioR(picMax):
-    """Calculate the T1/T2 = Top 1st frequency/Second higher frequency. For Li's methodology."""
-    T1 = picMax[0][0]
-    T2 = max(1,picMax[1][0])
-    R = float(T1)/float(T2)
-    return round(R)
-
-
-def packMode(R1, R2, R3):
-    """Make the prognosis about the phage packaging mode and termini type. For Li's methodology."""
-    packmode = "OTHER"
-    termini = ""
-    forward = ""
-    reverse = ""
-
-    if R1 < 30:
-        termini = "Absence"
-        if R2 < 3:
-            forward = "No Obvious Termini"
-        if R3 < 3:
-            reverse = "No Obvious Termini"
-    elif R1 > 100:
-        termini = "Fixed"
-        if R2 < 3:
-            forward = "Multiple-Pref. Term."
-        if R3 < 3:
-            reverse = "Multiple-Pref. Term."
-    else:
-        termini = "Preferred"
-        if R2 < 3:
-            forward = "Multiple-Pref. Term."
-        if R3 < 3:
-            reverse = "Multiple-Pref. Term."
-
-    if R2 >= 3:
-        forward = "Obvious Termini"
-    if R3 >= 3:
-        reverse = "Obvious Termini"
-
-    if R2 >= 3 and R3 >= 3:
-        packmode = "COS"
-    if R2 >= 3 and R3 < 3:
-        packmode = "PAC"
-    if R2 < 3 and R3 >= 3:
-        packmode = "PAC"
-    return packmode, termini, forward, reverse
-
-### PHAGE Information
-def orientation(picMaxPlus, picMaxMinus):
-    """Return phage termini orientation."""
-    if not picMaxPlus and not picMaxMinus:
-        return "NA"
-    if picMaxPlus and not picMaxMinus:
-        return "Forward"
-    if not picMaxPlus and picMaxMinus:
-        return "Reverse"
-    if picMaxPlus and picMaxMinus:
-        if picMaxPlus[0][0] > picMaxMinus[0][0]:
-            return "Forward"
-        elif picMaxMinus[0][0] > picMaxPlus[0][0]:
-            return "Reverse"
-        elif picMaxMinus[0][0] == picMaxPlus[0][0]:
-            return "NA"
-
-
-def typeCOS(PosPlus, PosMinus, nbr_lim):
-    """ Return type of COS sequence."""
-    if PosPlus < PosMinus and abs(PosPlus-PosMinus) < nbr_lim:
-        return "COS (5')", "Lambda"
-    else:
-        return "COS (3')", "HK97"
-
-def sequenceCohesive(Packmode, refseq, picMaxPlus, picMaxMinus, nbr_lim):
-    """Return cohesive sequence for COS phages."""
-    if Packmode != 'COS':
-        return '', Packmode
-    PosPlus = picMaxPlus[0][1]
-    PosMinus = picMaxMinus[0][1]
-
-    SC_class, SC_type = typeCOS(PosPlus, PosMinus, nbr_lim)
-
-    if SC_class == "COS (5')":
-        if abs(PosMinus - PosPlus) < nbr_lim:
-            seqcoh = refseq[min(PosPlus, PosMinus):max(PosPlus, PosMinus) + 1]
-            return seqcoh, Packmode
-        else:
-            seqcoh = refseq[max(PosPlus, PosMinus) + 1:] + refseq[:min(PosPlus, PosMinus)]
-            return seqcoh, Packmode
-
-    elif SC_class == "COS (3')":
-        if abs(PosMinus - PosPlus) < nbr_lim:
-            seqcoh = refseq[min(PosPlus, PosMinus) + 1:max(PosPlus, PosMinus)]
-            return seqcoh, Packmode
-        else:
-            seqcoh = refseq[max(PosPlus, PosMinus) + 1:] + refseq[:min(PosPlus, PosMinus)]
-            return seqcoh, Packmode
-    else:
-        return '', Packmode
-
-def selectSignificant(table, pvalue, limit):
-    """Return significant peaks over a limit"""
-    table_pvalue = table.loc[lambda df: df.pval_gamma_adj < pvalue, :]
-    table_pvalue_limit = table_pvalue.loc[lambda df: df.SPC > limit, :]
-    table_pvalue_limit.reset_index(drop=True, inplace=True)
-    return table_pvalue_limit
-
-def testMu(paired, list_hybrid, gen_len, used_reads, seed, insert, phage_hybrid_coverage, Mu_threshold, hostseq):
-    """Return Mu if enough hybrid reads compared to theory."""
-    if hostseq == "":
-        return 0, -1, -1, ""
-    if paired != "" and len(insert) != 0:
-        insert_mean    = sum(insert) / len(insert)
-    else:
-        insert_mean    = max(100, seed+10)
-    Mu_limit       = ((insert_mean - seed) / float(gen_len)) * used_reads/2
-    test           = 0
-    Mu_term_plus   = "Random"
-    Mu_term_minus  = "Random"
-    picMaxPlus_Mu, picMaxMinus_Mu, TopFreqH_phage_hybrid = picMax(phage_hybrid_coverage, 1)
-    picMaxPlus_Mu  = picMaxPlus_Mu[0][1]
-    picMaxMinus_Mu = picMaxMinus_Mu[0][1]
-
-    # Orientation
-    if list_hybrid[0] > list_hybrid[1]:
-        P_orient = "Forward"
-    elif list_hybrid[1] > list_hybrid[0]:
-        P_orient = "Reverse"
-    else:
-        P_orient = ""
-
-    # Termini
-    if list_hybrid[0] > ( Mu_limit * Mu_threshold ):
-        test = 1
-        pos_to_check = range(picMaxPlus_Mu+1,gen_len) + range(0,100)
-        for pos in pos_to_check:
-            if phage_hybrid_coverage[0][pos] >= max(1,phage_hybrid_coverage[0][picMaxPlus_Mu]/4):
-                Mu_term_plus = pos
-                picMaxPlus_Mu = pos
-            else:
-                Mu_term_plus = pos
-                break
-    # Reverse
-    if list_hybrid[1] > ( Mu_limit * Mu_threshold ):
-        test = 1
-        pos_to_check = range(0,picMaxMinus_Mu)[::-1] + range(gen_len-100,gen_len)[::-1]
-        for pos in pos_to_check:
-            if phage_hybrid_coverage[1][pos] >= max(1,phage_hybrid_coverage[1][picMaxMinus_Mu]/4):
-                Mu_term_minus = pos
-                picMaxMinus_Mu = pos
-            else:
-                Mu_term_minus = pos
-                break
-    return test, Mu_term_plus, Mu_term_minus, P_orient
-
-### DECISION Process
-def decisionProcess(plus_significant, minus_significant, limit_fixed, gen_len, paired, insert, R1, list_hybrid,
-                    used_reads, seed, phage_hybrid_coverage, Mu_threshold, refseq, hostseq):
-    """ ."""
-    P_orient = "NA"
-    P_seqcoh = ""
-    P_concat = ""
-    P_type = "-"
-    Mu_like = 0
-    P_left = "Random"
-    P_right = "Random"
-    # 2 peaks sig.
-    if not plus_significant.empty and not minus_significant.empty:
-        # Multiple
-        if (len(plus_significant["SPC"]) > 1 or len(minus_significant["SPC"]) > 1):
-            if not (plus_significant["SPC"][0] > limit_fixed or minus_significant["SPC"][0] > limit_fixed):
-                Redundant = 1
-                P_left = "Multiple"
-                P_right = "Multiple"
-                Permuted = "Yes"
-                P_class = "-"
-                P_type = "-"
-                return Redundant, Permuted, P_class, P_type, P_seqcoh, P_concat, P_orient, P_left, P_right, Mu_like
-
-        dist_peak = abs(plus_significant['Position'][0] - minus_significant['Position'][0])
-        dist_peak_over = abs(abs(plus_significant['Position'][0] - minus_significant['Position'][0]) - gen_len)
-        P_left = plus_significant['Position'][0]
-        P_right = minus_significant['Position'][0]
-        # COS
-        if (dist_peak <= 2) or (dist_peak_over <= 2):
-            Redundant = 0
-            Permuted = "No"
-            P_class = "COS"
-            P_type = "-"
-        elif (dist_peak < 20 and dist_peak > 2) or (dist_peak_over < 20 and dist_peak_over > 2):
-            Redundant = 0
-            Permuted = "No"
-            P_class, P_type = typeCOS(plus_significant["Position"][0], minus_significant["Position"][0], gen_len / 2)
-            P_seqcoh, packstat = sequenceCohesive("COS", refseq, [
-                ((plus_significant["SPC"][0]), (plus_significant["Position"][0]) - 1)], [((minus_significant["SPC"][0]),
-                                                                                          (
-                                                                                          minus_significant["Position"][
-                                                                                              0]) - 1)], gen_len / 2)
-        # DTR
-        elif (dist_peak <= 1000) or (dist_peak_over <= 1000):
-            Redundant = 1
-            Permuted = "No"
-            P_class = "DTR (short)"
-            P_type = "T7"
-            P_seqcoh, packstat = sequenceCohesive("COS", refseq, [
-                ((plus_significant["SPC"][0]), (plus_significant["Position"][0]) - 1)], [((minus_significant["SPC"][0]),
-                                                                                          (
-                                                                                          minus_significant["Position"][
-                                                                                              0]) - 1)], gen_len / 2)
-        elif (dist_peak <= 0.1 * gen_len) or (dist_peak_over <= 0.1 * gen_len):
-            Redundant = 1
-            Permuted = "No"
-            P_class = "DTR (long)"
-            P_type = "T5"
-            P_seqcoh, packstat = sequenceCohesive("COS", refseq, [
-                ((plus_significant["SPC"][0]), (plus_significant["Position"][0]) - 1)], [((minus_significant["SPC"][0]),
-                                                                                          (
-                                                                                          minus_significant["Position"][
-                                                                                              0]) - 1)], gen_len / 2)
-        else:
-            Redundant = 1
-            Permuted = "No"
-            P_class = "-"
-            P_type = "-"
-    # 1 peak sig.
-    elif not plus_significant.empty and minus_significant.empty or plus_significant.empty and not minus_significant.empty:
-        Redundant = 1
-        Permuted = "Yes"
-        P_class = "Headful (pac)"
-        P_type = "P1"
-        if paired != "":
-            if R1 == 0 or len(insert) == 0:
-                P_concat = 1
-            else:
-                P_concat = round((sum(insert) / len(insert)) / R1) - 1
-        if not plus_significant.empty:
-            P_left = plus_significant['Position'][0]
-            P_right = "Distributed"
-            P_orient = "Forward"
-        else:
-            P_left = "Distributed"
-            P_right = minus_significant['Position'][0]
-            P_orient = "Reverse"
-    # 0 peak sig.
-    elif plus_significant.empty and minus_significant.empty:
-        Mu_like, Mu_term_plus, Mu_term_minus, P_orient = testMu(paired, list_hybrid, gen_len, used_reads, seed, insert,
-                                                                phage_hybrid_coverage, Mu_threshold, hostseq)
-        if Mu_like:
-            Redundant = 0
-            Permuted = "No"
-            P_class = "Mu-like"
-            P_type = "Mu"
-            P_left = Mu_term_plus
-            P_right = Mu_term_minus
-        else:
-            Redundant = 1
-            Permuted = "Yes"
-            P_class = "-"
-            P_type = "-"
-
-    return Redundant, Permuted, P_class, P_type, P_seqcoh, P_concat, P_orient, P_left, P_right, Mu_like
-
-# Processes coverage values for a sequence.
-def processCovValuesForSeq(refseq,hostseq,refseq_name,refseq_liste,seed,analysis_name,tot_reads,results_pos,test_run, paired,edge,host,test, surrounding,limit_preferred,limit_fixed,Mu_threshold,\
-                           termini_coverage,whole_coverage,paired_whole_coverage,phage_hybrid_coverage,host_hybrid_coverage, host_whole_coverage,insert,list_hybrid,reads_tested,DR,DR_path=None):
-
-    print("\n\nFinished calculating coverage values, the remainder should be completed rapidly\n",
-    strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
-
-    # WHOLE Coverage : Average, Maximum and Minimum
-    added_whole_coverage, ave_whole_cov = wholeCov(whole_coverage, len(refseq))
-    added_paired_whole_coverage, ave_paired_whole_cov = wholeCov(paired_whole_coverage, len(refseq))
-    added_host_whole_coverage, ave_host_whole_cov = wholeCov(host_whole_coverage, len(hostseq))
-
-    drop_cov = testwholeCov(added_whole_coverage, ave_whole_cov, test_run)
-
-    # NORM pic by whole coverage (1 base)
-    if paired != "":
-        #paired_whole_coverage_test = maxPaired(paired_whole_coverage, whole_coverage)
-        termini_coverage_norm, mean_nc = normCov(termini_coverage, paired_whole_coverage, max(10, ave_whole_cov / 1.5),
-                                                 edge)
-    else:
-        termini_coverage_norm, mean_nc = normCov(termini_coverage, whole_coverage, max(10, ave_whole_cov / 1.5), edge)
-
-    # REMOVE edge
-    termini_coverage[0] = RemoveEdge(termini_coverage[0], edge)
-    termini_coverage[1] = RemoveEdge(termini_coverage[1], edge)
-    termini_coverage_norm[0] = RemoveEdge(termini_coverage_norm[0], edge)
-    termini_coverage_norm[1] = RemoveEdge(termini_coverage_norm[1], edge)
-    whole_coverage[0] = RemoveEdge(whole_coverage[0], edge)
-    whole_coverage[1] = RemoveEdge(whole_coverage[1], edge)
-    paired_whole_coverage[0] = RemoveEdge(paired_whole_coverage[0], edge)
-    paired_whole_coverage[1] = RemoveEdge(paired_whole_coverage[1], edge)
-    added_whole_coverage = RemoveEdge(added_whole_coverage, edge)
-    added_paired_whole_coverage = RemoveEdge(added_paired_whole_coverage, edge)
-    added_host_whole_coverage = RemoveEdge(added_host_whole_coverage, edge)
-    phage_hybrid_coverage[0] = RemoveEdge(phage_hybrid_coverage[0], edge)
-    phage_hybrid_coverage[1] = RemoveEdge(phage_hybrid_coverage[1], edge)
-    host_whole_coverage[0] = RemoveEdge(host_whole_coverage[0], edge)
-    host_whole_coverage[1] = RemoveEdge(host_whole_coverage[1], edge)
-    host_hybrid_coverage[0] = RemoveEdge(host_hybrid_coverage[0], edge)
-    host_hybrid_coverage[1] = RemoveEdge(host_hybrid_coverage[1], edge)
-    refseq = RemoveEdge(refseq, edge)
-    if host != "":
-        hostseq = RemoveEdge(hostseq, edge)
-    gen_len = len(refseq)
-    host_len = len(hostseq)
-    if test == "DL":
-        gen_len = 100000
-
-    # READS Total, Used and Lost
-    used_reads, lost_reads, lost_perc = usedReads(termini_coverage, reads_tested)
-
-    # PIC Max
-    picMaxPlus, picMaxMinus, TopFreqH = picMax(termini_coverage, 5)
-    picMaxPlus_norm, picMaxMinus_norm, TopFreqH_norm = picMax(termini_coverage_norm, 5)
-    picMaxPlus_host, picMaxMinus_host, TopFreqH_host = picMax(host_whole_coverage, 5)
-
-    ### ANALYSIS
-
-    ## Close Peaks
-    picMaxPlus, picOUT_forw = RemoveClosePicMax(picMaxPlus, gen_len, surrounding)
-    picMaxMinus, picOUT_rev = RemoveClosePicMax(picMaxMinus, gen_len, surrounding)
-    picMaxPlus_norm, picOUT_norm_forw = RemoveClosePicMax(picMaxPlus_norm, gen_len, surrounding)
-    picMaxMinus_norm, picOUT_norm_rev = RemoveClosePicMax(picMaxMinus_norm, gen_len, surrounding)
-
-    termini_coverage_close = termini_coverage[:]
-    termini_coverage_close[0], picOUT_forw = addClosePic(termini_coverage[0], picOUT_forw)
-    termini_coverage_close[1], picOUT_rev = addClosePic(termini_coverage[1], picOUT_rev)
-
-    termini_coverage_norm_close = termini_coverage_norm[:]
-    termini_coverage_norm_close[0], picOUT_norm_forw = addClosePic(termini_coverage_norm[0], picOUT_norm_forw, 1)
-    termini_coverage_norm_close[1], picOUT_norm_rev = addClosePic(termini_coverage_norm[1], picOUT_norm_rev, 1)
-
-    ## Statistical Analysis
-    picMaxPlus_norm_close, picMaxMinus_norm_close, TopFreqH_norm = picMax(termini_coverage_norm_close, 5)
-    phage_norm, phage_plus_norm, phage_minus_norm = test_pics_decision_tree(paired_whole_coverage, termini_coverage,
-                                                                            termini_coverage_norm,
-                                                                            termini_coverage_norm_close)
-    # VL: comment that since the 2 different conditions lead to the execution of the same piece of code...
-    # if paired != "":
-    #     phage_norm, phage_plus_norm, phage_minus_norm = test_pics_decision_tree(paired_whole_coverage, termini_coverage,
-    #                                                                             termini_coverage_norm,
-    #                                                                             termini_coverage_norm_close)
-    # else:
-    #     phage_norm, phage_plus_norm, phage_minus_norm = test_pics_decision_tree(whole_coverage, termini_coverage,
-    #                                                                             termini_coverage_norm,
-    #                                                                             termini_coverage_norm_close)
-
-
-    ## LI Analysis
-    picMaxPlus_close, picMaxMinus_close, TopFreqH = picMax(termini_coverage_close, 5)
-
-    R1, AveFreq = ratioR1(TopFreqH, used_reads, gen_len)
-    R2 = ratioR(picMaxPlus_close)
-    R3 = ratioR(picMaxMinus_close)
-
-    ArtPackmode, termini, forward, reverse = packMode(R1, R2, R3)
-    ArtOrient = orientation(picMaxPlus_close, picMaxMinus_close)
-    ArtcohesiveSeq, ArtPackmode = sequenceCohesive(ArtPackmode, refseq, picMaxPlus_close, picMaxMinus_close,
-                                                   gen_len / 2)
-
-    ### DECISION Process
-
-    # PEAKS Significativity
-    plus_significant = selectSignificant(phage_plus_norm, 1.0 / gen_len, limit_preferred)
-    minus_significant = selectSignificant(phage_minus_norm, 1.0 / gen_len, limit_preferred)
-
-    # DECISION
-    Redundant, Permuted, P_class, P_type, P_seqcoh, P_concat, P_orient, P_left, P_right, Mu_like = decisionProcess(
-        plus_significant, minus_significant, limit_fixed, gen_len, paired, insert, R1, list_hybrid, used_reads,
-        seed, phage_hybrid_coverage, Mu_threshold, refseq, hostseq)
-
-    ### Results
-    if len(refseq_liste) != 1:
-        if P_class == "-":
-            if P_left == "Random" and P_right == "Random":
-                P_class = "UNKNOWN"
-            else:
-                P_class = "NEW"
-        DR[P_class][checkReportTitle(refseq_name[results_pos])] = {"analysis_name": analysis_name, "seed": seed,
-                                                                 "added_whole_coverage": added_whole_coverage,
-                                                                 "Redundant": Redundant, "P_left": P_left,
-                                                                 "P_right": P_right, "Permuted": Permuted,
-                                                                 "P_orient": P_orient,
-                                                                 "termini_coverage_norm_close": termini_coverage_norm_close,
-                                                                 "picMaxPlus_norm_close": picMaxPlus_norm_close,
-                                                                 "picMaxMinus_norm_close": picMaxMinus_norm_close,
-                                                                 "gen_len": gen_len, "tot_reads": tot_reads,
-                                                                 "P_seqcoh": P_seqcoh,
-                                                                 "phage_plus_norm": phage_plus_norm,
-                                                                 "phage_minus_norm": phage_minus_norm,
-                                                                 "ArtPackmode": ArtPackmode, "termini": termini,
-                                                                 "forward": forward, "reverse": reverse,
-                                                                 "ArtOrient": ArtOrient,
-                                                                 "ArtcohesiveSeq": ArtcohesiveSeq,
-                                                                 "termini_coverage_close": termini_coverage_close,
-                                                                 "picMaxPlus_close": picMaxPlus_close,
-                                                                 "picMaxMinus_close": picMaxMinus_close,
-                                                                 "picOUT_norm_forw": picOUT_norm_forw,
-                                                                 "picOUT_norm_rev": picOUT_norm_rev,
-                                                                 "picOUT_forw": picOUT_forw,
-                                                                 "picOUT_rev": picOUT_rev, "lost_perc": lost_perc,
-                                                                 "ave_whole_cov": ave_whole_cov, "R1": R1, "R2": R2,
-                                                                 "R3": R3, "host": host, "host_len": host_len,
-                                                                 "host_whole_coverage": host_whole_coverage,
-                                                                 "picMaxPlus_host": picMaxPlus_host,
-                                                                 "picMaxMinus_host": picMaxMinus_host,
-                                                                 "surrounding": surrounding, "drop_cov": drop_cov,
-                                                                 "paired": paired, "insert": insert,
-                                                                 "phage_hybrid_coverage": phage_hybrid_coverage,
-                                                                 "host_hybrid_coverage": host_hybrid_coverage,
-                                                                 "added_paired_whole_coverage": added_paired_whole_coverage,
-                                                                 "Mu_like": Mu_like, "test_run": test_run,
-                                                                 "P_class": P_class, "P_type": P_type,
-                                                                 "P_concat": P_concat,
-                                                                 "idx_refseq_in_list": results_pos}
-
-        if DR_path!=None: # multi machine cluster mode.
-            strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
-            P_class_dir=os.path.join(DR_path,P_class)
-            if os.path.exists(P_class_dir):
-                if not os.path.isdir(P_class_dir):
-                    raise RuntimeError("P_class_dir is not a directory")
-            else:
-                os.mkdir(P_class_dir)
-            import pickle
-            fic_name=os.path.join(P_class_dir,checkReportTitle(refseq_name[results_pos]))
-            items_to_save=(analysis_name,seed,added_whole_coverage,Redundant,P_left,P_right,Permuted, \
-                           P_orient,termini_coverage_norm_close,picMaxPlus_norm_close,picMaxMinus_norm_close, \
-                           gen_len,tot_reads,P_seqcoh,phage_plus_norm,phage_minus_norm,ArtPackmode,termini, \
-                           forward,reverse,ArtOrient,ArtcohesiveSeq,termini_coverage_close,picMaxPlus_close, \
-                           picMaxMinus_close,picOUT_norm_forw,picOUT_norm_rev,picOUT_forw,picOUT_rev, \
-                           lost_perc,ave_whole_cov,R1,R2,R3,host,host_len,host_whole_coverage,picMaxPlus_host, \
-                           picMaxMinus_host,surrounding,drop_cov,paired, insert,phage_hybrid_coverage, \
-                           host_hybrid_coverage,added_paired_whole_coverage,Mu_like,test_run,P_class,P_type,\
-                           P_concat,results_pos)
-            with open(fic_name,'wb') as f:
-                pickle.dump(items_to_save,f)
-            f.close()
-
-    return SeqStats(P_class, P_left, P_right, P_type, P_orient, ave_whole_cov, phage_plus_norm, phage_minus_norm, ArtcohesiveSeq, P_seqcoh, Redundant, Mu_like, \
-        added_whole_coverage, Permuted, termini_coverage_norm_close, picMaxPlus_norm_close, picMaxMinus_norm_close, gen_len, termini_coverage_close, \
-        ArtPackmode, termini, forward, reverse, ArtOrient, picMaxPlus_close, picMaxMinus_close, picOUT_norm_forw, picOUT_norm_rev, picOUT_forw, picOUT_rev, \
-        lost_perc, R1, R2, R3, picMaxPlus_host, picMaxMinus_host, drop_cov, added_paired_whole_coverage, P_concat)
diff --git a/_modules/functions_PhageTerm.py b/_modules/functions_PhageTerm.py
deleted file mode 100644
index a20be630d08f0d9faf60941029b21af75adaf017..0000000000000000000000000000000000000000
--- a/_modules/functions_PhageTerm.py
+++ /dev/null
@@ -1,1520 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-## @file functions_PhageTerm.py
-#
-#  This file is a part of PhageTerm software
-#  A tool to determine phage termini and packaging strategy
-#  and other useful informations using raw sequencing reads.
-#  (This programs works with sequencing reads from a randomly
-#  sheared DNA library preparations as Illumina TruSeq paired-end or similar)
-#
-#  ----------------------------------------------------------------------
-#  Copyright (C) 2017 Julian Garneau
-#
-#   This program is free software; you can redistribute it and/or modify
-#   it under the terms of the GNU General Public License as published by
-#   the Free Software Foundation; either version 3 of the License, or
-#   (at your option) any later version.
-#   <http://www.gnu.org/licenses/gpl-3.0.html>
-#
-#   This program is distributed in the hope that it will be useful,
-#   but WITHOUT ANY WARRANTY; without even the implied warranty of
-#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#   GNU General Public License for more details.
-#  ----------------------------------------------------------------------
-#
-#  @author Julian Garneau <julian.garneau@usherbrooke.ca>
-#  @author Marc Monot <marc.monot@pasteur.fr>
-#  @author David Bikard <david.bikard@pasteur.fr>
-
- 
-### PYTHON Module
-# Base
-from __future__ import print_function
-
-import sys
-
-import os
-
-
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-from matplotlib import patches
-from matplotlib.path import Path
-
-import numpy as np
-import pandas as pd
-
-# String
-#import cStringIO
-import io
-import gzip
-
-# PDF report building
-import time
-from reportlab.lib.enums import TA_JUSTIFY, TA_CENTER, TA_LEFT, TA_RIGHT
-from reportlab.lib.pagesizes import letter, landscape
-from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, Table, TableStyle, PageBreak
-from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
-from reportlab.lib.units import inch
-from reportlab.lib import colors
-from reportlab.lib.utils import ImageReader
-
-from _modules.utilities import reverseComplement,hybridCoverage,applyCoverage,correctEdge
-from _modules.common_readsCoverage_processing import picMax
-from _modules.readsCoverage_res import RCRes, RCCheckpoint_handler,RCWorkingS
-
-### UTILITY function
-def chunks(l, n):
-    """Yield n successive chunks from l."""
-    newn = int(1.0 * len(l) / n + 0.5)
-    for i in range(0, n-1):
-        yield l[i*newn:i*newn+newn]
-    yield l[n*newn-newn:]
-
-##
-# Initializes working structure for readsCoverage
-def init_ws(p_res,refseq,hostseq):
-    gen_len = len(refseq)
-    host_len = len(hostseq)
-    k = count_line = 0
-    if p_res==None:
-        termini_coverage      = np.array([gen_len*[0], gen_len*[0]])
-        whole_coverage        = np.array([gen_len*[0], gen_len*[0]])
-        paired_whole_coverage = np.array([gen_len*[0], gen_len*[0]])
-        phage_hybrid_coverage = np.array([gen_len*[0], gen_len*[0]])
-        host_hybrid_coverage  = np.array([host_len*[0], host_len*[0]])
-        host_whole_coverage   = np.array([host_len*[0], host_len*[0]])
-        list_hybrid           = np.array([0,0])
-        insert                = []
-        paired_missmatch      = 0
-        read_match            = 0
-    else:
-        termini_coverage=p_res.interm_res.termini_coverage
-        whole_coverage=p_res.interm_res.whole_coverage
-        paired_whole_coverage=p_res.interm_res.paired_whole_coverage
-        phage_hybrid_coverage=p_res.interm_res.phage_hybrid_coverage
-        host_hybrid_coverage=p_res.interm_res.host_hybrid_coverage
-        host_whole_coverage=p_res.interm_res.host_whole_coverage
-        list_hybrid=p_res.interm_res.list_hybrid
-        insert=p_res.interm_res.insert
-        paired_missmatch=p_res.interm_res.paired_mismatch
-        k=int(p_res.interm_res.reads_tested)
-        #count_line=p_res.count_line-1 # do that because readsCoverage will start by incrementing it of 1
-        read_match=p_res.read_match
-    return gen_len,host_len,termini_coverage,whole_coverage,paired_whole_coverage,phage_hybrid_coverage,host_hybrid_coverage, \
-           host_whole_coverage,list_hybrid,insert,paired_missmatch,k,count_line,read_match #TODO refactor that.
-
-
-
-## COVERAGE Starting and Whole function
-#
-# VL: use debug mode to keep track of what reads matched and what reads didn't. For those who matched, want to know if it is at the beginning of the read or at the end or if it is its reverse complement.
-# My aim is to compare the results with those of the GPU version.
-def readsCoverage(inRawDArgs,refseq,inDArgs,fParms,return_dict, core_id,line_start,line_end,tParms,\
-                  chk_handler,idx_refseq,logger=None):
-    """Calculate whole coverage and first base coverage. """
-
-    p_res=chk_handler.load(core_id,idx_refseq)
-    gen_len,host_len,termini_coverage, whole_coverage, paired_whole_coverage, phage_hybrid_coverage, host_hybrid_coverage,\
-    host_whole_coverage, list_hybrid, insert, paired_missmatch, k, count_line, read_match=init_ws(p_res, refseq, inDArgs.hostseq)
-    if logger!=None:
-        logger.add_rw(p_res)
-    test_read_seq = match = 0
-    # Timer
-    if core_id == (tParms.core-1):
-        sys.stdout.write("  0.0 %")
-        sys.stdout.flush()
-    
-    # Mapping
-    #filin            = open(inRawDArgs.fastq)
-    filin = gzip.open(inRawDArgs.fastq, "rt") if inRawDArgs.fastq.endswith(".gz") else open(inRawDArgs.fastq)
-    line             = filin.readline()
-    
-    if inRawDArgs.paired != "":
-        #filin_paired = open(inRawDArgs.paired)
-        filin_paired = gzip.open(inRawDArgs.paired, "rt") if inRawDArgs.paired.endswith(".gz") else open(inRawDArgs.paired)
-        line_paired  = filin_paired.readline()
-    count_line_tmp=0
-    while line and count_line!=count_line_tmp:
-        count_line_tmp += 1
-        line = filin.readline()
-    while line:
-        count_line+=1
-        if count_line//4 <= line_start:
-            test_read_seq = 0
-        if count_line//4 > line_end:
-            break
-
-        if test_read_seq:
-            k += 1
-            # Read sequence
-            read = line.split("\n")[0].split("\r")[0]
-            line = filin.readline()
-            
-            if inRawDArgs.paired != "":
-                read_paired = line_paired.split("\n")[0].split("\r")[0]
-                line_paired = filin_paired.readline()
-            
-            readlen = len(read)
-            if readlen < fParms.seed:
-                if logger!=None:
-                    print("CPU rejecting read",k)
-                continue
-            corlen = readlen-fParms.seed
-
-            if logger!=None:
-                print("CPU processing read: ",k,read, reverseComplement(read))
-                logger.newRmInfo(k)
-            
-            ### Match sense + (multiple, random pick one)
-            #print("read[:fParms.seed]=",read[:fParms.seed])
-            matchPplus_start, matchPplus_end = applyCoverage(read[:fParms.seed], refseq)
-            
-            ## Phage
-            if matchPplus_start != -1:
-                if logger!=None:
-                    print("CPU found: ",read[:fParms.seed])
-                    logger.rMatch("mstart")
-                match = 1
-                termini_coverage[0][matchPplus_start]+=1
-                position_end = matchPplus_end+corlen
-                
-                # whole coverage
-                for i in range(matchPplus_start, min(gen_len,position_end)):
-                    whole_coverage[0][i]+=1
-                
-                # Paired-read
-                if inRawDArgs.paired != "":
-                    matchPplus_start_paired, matchPplus_end_paired = applyCoverage(reverseComplement(read_paired)[-fParms.seed:], refseq)
-                    insert_length = matchPplus_end_paired - matchPplus_start
-                    if insert_length > 0 and insert_length < fParms.insert_max:
-                        position_end = matchPplus_end_paired
-                        insert.append(insert_length)
-                    else:
-                        paired_missmatch += 1
-                        # Paired hybrid
-                        if inDArgs.hostseq != "" and matchPplus_start_paired == -1:
-                            matchHplus_start, matchHplus_end = applyCoverage(read_paired[:fParms.seed], inDArgs.hostseq)
-                            if matchHplus_start != -1:
-                                list_hybrid[0] += 1
-                                phage_hybrid_coverage[0] = hybridCoverage(read, refseq, phage_hybrid_coverage[0], matchPplus_start, min(gen_len,matchPplus_end+corlen) )
-                                host_hybrid_coverage[0]  = hybridCoverage(read_paired, inDArgs.hostseq, host_hybrid_coverage[0], matchHplus_start, min(host_len,matchHplus_end+corlen) )
-                            else:
-                                matchHminus_start, matchHminus_end = applyCoverage(reverseComplement(read_paired)[:fParms.seed], inDArgs.hostseq)
-                                if matchHminus_start != -1:
-                                    list_hybrid[0] += 1
-                                    phage_hybrid_coverage[0] = hybridCoverage(read, refseq, phage_hybrid_coverage[0], matchPplus_start, min(gen_len,matchPplus_end+corlen) )
-                                    host_hybrid_coverage[1]  = hybridCoverage(reverseComplement(read_paired), inDArgs.hostseq, host_hybrid_coverage[1], matchHminus_start, min(host_len,matchHminus_end+corlen) )
-
-                # Single hybrid
-                elif inDArgs.hostseq != "":
-                    matchPFplus_start, matchPFplus_end = applyCoverage(read[-fParms.seed:], refseq)
-                    if matchPFplus_start == -1:
-                        matchHplus_start, matchHplus_end = applyCoverage(read[-fParms.seed:], inDArgs.hostseq)
-                        if matchHplus_start != -1:
-                            list_hybrid[0] += 1
-                            phage_hybrid_coverage[0] = hybridCoverage(read, refseq, phage_hybrid_coverage[0], matchPplus_start, min(gen_len,matchPplus_end+corlen) )
-                            host_hybrid_coverage[0]  = hybridCoverage(read, inDArgs.hostseq, host_hybrid_coverage[0], matchHplus_start, min(host_len,matchHplus_end+corlen) )
-                        else:
-                            matchHminus_start, matchHminus_end = applyCoverage(reverseComplement(read)[-fParms.seed:], inDArgs.hostseq)
-                            if matchHminus_start != -1:
-                                list_hybrid[0] += 1
-                                phage_hybrid_coverage[0] = hybridCoverage(read, refseq, phage_hybrid_coverage[0], matchPplus_start, min(gen_len,matchPplus_end+corlen) )
-                                host_hybrid_coverage[1]  = hybridCoverage(reverseComplement(read), inDArgs.hostseq, host_hybrid_coverage[1], matchHminus_start, min(host_len,matchHminus_end+corlen) )
-
-                # whole coverage
-                for i in range(matchPplus_start, min(gen_len,position_end)):
-                    paired_whole_coverage[0][i]+=1
-            
-            ### if no match sense +, then test sense -
-            if not match:
-                matchPminus_start, matchPminus_end = applyCoverage(reverseComplement(read)[-fParms.seed:], refseq)
-                
-                ## Phage
-                if matchPminus_end != -1:
-                    if logger != None:
-                        print("CPU found: ",reverseComplement(read)[-fParms.seed:]," from ",reverseComplement(read))
-                        logger.rMatch("mrcplstart")
-                    match = 1
-                    termini_coverage[1][matchPminus_end-1]+=1
-                    position_start = matchPminus_start-corlen
-                    
-                    # whole coverage
-                    for i in range(max(0,position_start), matchPminus_end):
-                        whole_coverage[1][i]+=1
-                    
-                    # Paired-read
-                    if inRawDArgs.paired != "":
-                        matchPminus_start_paired, matchPminus_end_paired = applyCoverage(read_paired[:fParms.seed], refseq)
-                        insert_length = matchPminus_end - matchPminus_start_paired
-                        if insert_length > 0 and insert_length < fParms.insert_max:
-                            position_start = matchPminus_start_paired
-                            insert.append(insert_length)
-                        else:
-                            paired_missmatch += 1
-                            # Paired hybrid
-                            if inDArgs.hostseq != "" and matchPminus_start_paired == -1:
-                                matchHplus_start, matchHplus_end = applyCoverage(read_paired[:fParms.seed], inDArgs.hostseq)
-                                if matchHplus_start != -1:
-                                    list_hybrid[1] += 1
-                                    phage_hybrid_coverage[1] = hybridCoverage(reverseComplement(read), refseq, phage_hybrid_coverage[1], matchPminus_start, min(gen_len,matchPminus_end+corlen) )
-                                    host_hybrid_coverage[0]  = hybridCoverage(read_paired, inDArgs.hostseq, host_hybrid_coverage[0], matchHplus_start, min(host_len,matchHplus_end+corlen) )
-
-                                else:
-                                    matchHminus_start, matchHminus_end = applyCoverage(reverseComplement(read_paired)[-fParms.seed:], inDArgs.hostseq)
-                                    if matchHminus_start != -1:
-                                        list_hybrid[1] += 1
-                                        phage_hybrid_coverage[1] = hybridCoverage(reverseComplement(read), refseq, phage_hybrid_coverage[1], matchPminus_start, min(gen_len,matchPminus_end+corlen) )
-                                        host_hybrid_coverage[1]  = hybridCoverage(reverseComplement(read_paired), inDArgs.hostseq, host_hybrid_coverage[1], matchHminus_start, min(host_len,matchHminus_end+corlen) )
-            
-                    # Single hybrid
-                    elif inDArgs.hostseq != "":
-                        matchPRplus_start, matchPRplus_end = applyCoverage(reverseComplement(read)[:fParms.seed], refseq)
-                        if matchPRplus_start == -1:
-                            matchHplus_start, matchHplus_end = applyCoverage(read[:fParms.seed], inDArgs.hostseq)
-                            if matchHplus_start != -1:
-                                list_hybrid[1] += 1
-                                phage_hybrid_coverage[1] = hybridCoverage(reverseComplement(read), refseq, phage_hybrid_coverage[1], matchPminus_start, min(gen_len,matchPminus_end+corlen) )
-                                host_hybrid_coverage[0]  = hybridCoverage(read, inDArgs.hostseq, host_hybrid_coverage[0], matchHplus_start, min(host_len,matchHplus_end+corlen) )
-                            else:
-                                matchHminus_start, matchHminus_end = applyCoverage(reverseComplement(read)[:fParms.seed], inDArgs.hostseq)
-                                if matchHminus_start != -1:
-                                    list_hybrid[1] += 1
-                                    phage_hybrid_coverage[1] = hybridCoverage(reverseComplement(read), refseq, phage_hybrid_coverage[1], matchPminus_start, min(gen_len,matchPminus_end+corlen) )
-                                    host_hybrid_coverage[1]  = hybridCoverage(reverseComplement(read), inDArgs.hostseq, host_hybrid_coverage[1], matchHminus_start, min(host_len,matchHminus_end+corlen) )
-
-                    # whole coverage
-                    for i in range(max(0,position_start), matchPminus_end):
-                        paired_whole_coverage[1][i]+=1
-            
-            ### if no match on Phage, test Host
-            if not match:
-                matchHplus_start, matchHplus_end = applyCoverage(read[:fParms.seed], inDArgs.hostseq)
-                if matchHplus_start != -1:
-                    for i in range(matchHplus_start, min(host_len,matchHplus_end+corlen)):
-                        host_whole_coverage[0][i]+=1
-                else:
-                    matchHminus_start, matchHminus_end = applyCoverage(reverseComplement(read)[-fParms.seed:], inDArgs.hostseq)
-                    if matchHminus_end != -1:
-                        for i in range(max(0,matchHminus_start-corlen), matchHminus_end):
-                            host_whole_coverage[1][i]+=1
-
-            # TEST limit_coverage
-            read_match += match*readlen
-
-            match = test_read_seq = 0
-            # Timer
-            if core_id == (tParms.core-1):
-                if k%1000 == 0:
-                    sys.stdout.write("\b\b\b\b\b\b\b\b\b%3.1f %%" %( float(read_match/gen_len) / tParms.limit_coverage * 100 ))
-                    sys.stdout.flush()
-
-            chk_handler.check(count_line,core_id,idx_refseq,termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_missmatch,k,read_match) # maybe time to create checkpoint
-
-        else:
-            if line[0] == "@":
-                test_read_seq = 1
-            
-            line = filin.readline()
-            if inRawDArgs.paired != "":
-                line_paired = filin_paired.readline()
-
-            # TEST limit_coverage
-            if (read_match/gen_len) > tParms.limit_coverage:
-                line = 0
-            
-
-    if core_id == (tParms.core-1):
-        sys.stdout.write("\b\b\b\b\b\b\b\b\b%3.1f %%" %( 100 ))
-        sys.stdout.flush()
-
-    # Close file
-    filin.close()
-    if inRawDArgs.paired != "":
-        filin_paired.close()
-
-
-    # Correct EDGE coverage
-    if sum(termini_coverage[0]) + sum(termini_coverage[1]) == 0 and not fParms.virome:
-        print("WARNING: No read Match, please check your fastq file")
-    
-    termini_coverage       = correctEdge(termini_coverage, fParms.edge)
-    #paired_whole_coverage = correctEdge(whole_coverage, fParms.edge) #TODO: discuss with Julian and Max about the PE issue that Max reported.
-    whole_coverage         = correctEdge(whole_coverage, fParms.edge)
-    phage_hybrid_coverage = correctEdge(phage_hybrid_coverage, fParms.edge)
-    if inDArgs.hostseq != "":
-        host_whole_coverage    = correctEdge(host_whole_coverage, fParms.edge)
-        host_hybrid_coverage   = correctEdge(host_hybrid_coverage, fParms.edge)
-
-    if return_dict!=None and tParms.dir_cov_mm==None:
-        return_dict[core_id] = [termini_coverage, whole_coverage, paired_whole_coverage, phage_hybrid_coverage, host_hybrid_coverage, host_whole_coverage, list_hybrid, np.array(insert), paired_missmatch, k]
-    elif return_dict==None and tParms.dir_cov_mm!=None:
-        insert = np.array(insert)
-        fic_name = os.path.join(tParms.dir_cov_mm, "coverage" + str(idx_refseq) + "_" + str(core_id))
-        res=RCRes(termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_missmatch,k)
-        res.save(fic_name)
-    else:
-        print("Error: readsCoverage must be used either with directory name or return_dict")
-    chk_handler.end(core_id)
-
-    return
-
-
-
-### IMAGE Functions
-def GraphCov(termini_coverage, picMaxPlus, picMaxMinus, phagename, norm, draw, hybrid = 0):
-    """Produces a plot with termini coverage values."""
-    # Remove old plot
-    plt.clf()
-    plt.cla()
-    plt.close()
-    # Create figure
-    plt.figure(1)
-    term_len = len(termini_coverage[0])
-    term_range = list(range(term_len))
-    # MOP: not totally sure what's going on here with the plot formatting
-    # but I refactored this out as it was getting complicated.
-    # Someone who understands the code better might put in more informative var names.
-    zipper = list(zip(*picMaxPlus))
-    max_first_zipper = max(np.array(zipper[0]))
-    if norm == 1:
-        ylim = 1.2
-    elif hybrid == 1:
-        offset = 0.2*(max_first_zipper) + 1
-        ylim = max_first_zipper + offset
-    else:
-        offset = 0.2*(max(max(np.array(list(zip(*picMaxPlus))[0])), max(np.array(list(zip(*picMaxMinus))[0]))))
-        ylim = max(max(np.array(list(zip(*picMaxPlus))[0])), max(np.array(list(zip(*picMaxMinus))[0]))) + offset
-    # Strand (+)
-    plt.subplot(211)
-    if norm == 1:
-        plt.scatter(term_range,termini_coverage[0])   
-    else:
-        plt.plot(termini_coverage[0],linewidth=2)
-    plt.title('strand (+)')
-    plt.ylabel('')
-    # Axes
-    axes = plt.gca()
-    axes.set_ylim([0,ylim])
-    # Maximum
-    x_strandplus = np.array(list(zip(*picMaxPlus))[1])
-    y_strandplus = np.array(list(zip(*picMaxPlus))[0])
-    # Plot
-    plt.plot(x_strandplus, y_strandplus, 'ro')
-    if norm == 1:
-        axes.axhline(y=0.5, xmin=0, xmax=x_strandplus, color='grey', linestyle='dashed', linewidth=1.5)
-        axes.axhline(y=1.0, xmin=0, xmax=x_strandplus, color='grey', linestyle='dashed', linewidth=1.5)
-    # Annotation
-    for i,j in zip(x_strandplus,y_strandplus):
-        plt.text(i+(term_len*0.03), j, str(i+1), fontsize=15, bbox=dict(boxstyle='round', facecolor='white', alpha=1))
-    # Plot Option
-    plt.margins(0.1)
-    plt.locator_params(axis = 'x', nbins = 10)
-    plt.locator_params(axis = 'y', nbins = 3)
-    plt.xticks(rotation=75)
-    # Strand (-)
-    plt.subplot(212)
-    if norm == 1:
-        plt.scatter(term_range,termini_coverage[1])
-    else:
-        plt.plot(termini_coverage[1],linewidth=2)
-    plt.title('strand (-)')
-    plt.ylabel('')
-    # Axes
-    if hybrid == 1:
-        offset = 0.2*(max_first_zipper) + 1
-        ylim = max_first_zipper + offset
-    axes = plt.gca()
-    axes.set_ylim([0,ylim])
-    # Maximum
-    x_strandminus = np.array(list(zip(*picMaxMinus))[1])
-    y_strandminus = np.array(list(zip(*picMaxMinus))[0])
-    # Plot
-    plt.plot(x_strandminus, y_strandminus, 'ro')
-    if norm == 1:
-        axes.axhline(y=0.5, xmin=0, xmax=x_strandplus, color='grey', linestyle='dashed', linewidth=1.5)
-        axes.axhline(y=1.0, xmin=0, xmax=x_strandplus, color='grey', linestyle='dashed', linewidth=1.5)
-    # Annotation
-    for i,j in zip(x_strandminus,y_strandminus):
-        plt.text(i+(term_len*0.03), j, str(i+1), fontsize=15, bbox=dict(boxstyle='round', facecolor='white', alpha=1))
-    # Plot Option
-    plt.margins(0.1)
-    plt.locator_params(axis = 'x', nbins = 10)
-    plt.locator_params(axis = 'y', nbins = 3)
-    plt.xticks(rotation=75)
-    # Plot Adjustments
-    plt.tight_layout()
-    # Draw graph
-    if draw:
-        plt.savefig("%s_TCov.png" % phagename, dpi=200)
-    fig = plt.figure(1)
-    return fig
-
-def GraphWholeCov(added_whole_coverage, phagename, draw, P_left = "", P_right = "", pos_left = 0, pos_right = 0, graphZoom = 0, title = "WHOLE COVERAGE"):
-    """Produces a plot with whole coverage values."""
-    # Remove old plot
-    plt.clf()
-    plt.cla()
-    plt.close()
-    # Create figure
-    offset = 0.2*(max(added_whole_coverage))
-    ylim = max(added_whole_coverage) + offset
-    # Cumulative both strands
-    plt.figure(figsize=(15,8))
-    plt.plot(added_whole_coverage,linewidth=2)
-    plt.title(title)
-    # Axes
-    axes = plt.gca()
-    axes.set_ylim([0,ylim])
-    # Plot Option
-    plt.margins(0.1)
-    plt.locator_params(axis = 'x', nbins = 10)
-    plt.xticks(rotation=75)
-    # Termini vertical dashed line display
-    if graphZoom and isinstance(P_left, np.integer):
-        plt.axvline(x=pos_left, ymin=0, ymax=ylim, color='red', zorder=2, linestyle='dashed', linewidth=1)
-    if graphZoom and isinstance(P_right, np.integer):
-        plt.axvline(x=pos_right, ymin=0, ymax=ylim, color='green', zorder=2, linestyle='dashed', linewidth=1)
-    # Draw graph
-    if draw:
-        plt.savefig("%s_plot_WCov.png" % phagename, dpi=200)
-    fig = plt.figure(1)
-    return fig
-
-def GraphLogo(P_class, P_left, P_right, draw):
-    """Produce logo."""
-    # Remove old plot
-    plt.clf()
-    plt.cla()
-    plt.close()
-    # Create figure
-    plt.figure(figsize=(10,10))
-    #axes = plt.add_subplot(111)
-    axes = plt.gca()
-    axes.set_frame_on(False)
-    axes.xaxis.set_visible(False)
-    axes.yaxis.set_visible(False)
-    # Cadre
-    axes.add_artist(patches.Rectangle((0.1, 0.1), 0.8, 0.8, edgecolor = 'black', fill = False, linewidth = 15))
-
-    if P_class == "Headful (pac)":
-        # Texte
-        axes.text(0.17, 0.7, r"Headful (pac)", fontsize=50, fontweight='bold')
-        # PAC (blue line)
-        axes.axhline(y=0.35, xmin=0.2, xmax=0.8, color='blue', linewidth=15)
-        # PAC (red line)
-        axes.axvline(x=0.19, ymin=0.30, ymax=0.40, color='red', linewidth=10)
-        axes.axvline(x=0.42, ymin=0.30, ymax=0.40, color='red', linewidth=10)
-        axes.axvline(x=0.65, ymin=0.30, ymax=0.40, color='red', linewidth=10)
-        # PAC (Arrow)
-        axes.axvline(x=0.19, ymin=0.45, ymax=0.55, color='red', linewidth=15)
-        axes.arrow(0.19, 0.55, 0.07, 0, color='red', linewidth=15, head_width=0.07, head_length=0.1)
-        
-    elif P_class == "COS (5')":
-        # Texte
-        axes.text(0.3, 0.7, r"COS (5')", fontsize=50, fontweight='bold')
-        axes.add_artist(patches.Ellipse(xy=(0.5,0.4), width=0.5, height=0.35 , edgecolor = 'blue', fill=False, lw=15))
-        axes.add_artist(patches.Ellipse(xy=(0.5,0.4), width=0.58, height=0.43 , edgecolor = 'blue', fill=False, lw=15))
-        axes.add_artist(patches.Rectangle((0.4, 0.5), 0.20, 0.20, edgecolor = 'white', facecolor = 'white', fill = True))
-        axes.axhline(y=0.56, xmin=0.415, xmax=0.48, color='red', linewidth=16)
-        axes.axhline(y=0.601, xmin=0.52, xmax=0.585, color='red', linewidth=16)
-
-    elif P_class == "COS (3')":
-        # Texte
-        axes.text(0.3, 0.7, r"COS (3')", fontsize=50, fontweight='bold')
-        axes.add_artist(patches.Ellipse(xy=(0.5,0.4), width=0.5, height=0.35 , edgecolor = 'blue', fill=False, lw=15))
-        axes.add_artist(patches.Ellipse(xy=(0.5,0.4), width=0.58, height=0.43 , edgecolor = 'blue', fill=False, lw=15))
-        axes.add_artist(patches.Rectangle((0.4, 0.5), 0.20, 0.20, edgecolor = 'white', facecolor = 'white', fill = True))
-        axes.axhline(y=0.601, xmin=0.415, xmax=0.48, color='red', linewidth=16)
-        axes.axhline(y=0.56, xmin=0.52, xmax=0.585, color='red', linewidth=16)
-
-    elif P_class == "COS":
-        # Texte
-        axes.text(0.4, 0.7, r"COS", fontsize=50, fontweight='bold')
-        axes.add_artist(patches.Ellipse(xy=(0.5,0.4), width=0.5, height=0.35 , edgecolor = 'blue', fill=False, lw=15))
-        axes.add_artist(patches.Ellipse(xy=(0.5,0.4), width=0.58, height=0.43 , edgecolor = 'blue', fill=False, lw=15))
-        axes.add_artist(patches.Rectangle((0.4, 0.5), 0.20, 0.20, edgecolor = 'white', facecolor = 'white', fill = True))
-
-    elif P_class == "DTR (short)":
-        # Texte
-        axes.text(0.22, 0.7, r"DTR (short)", fontsize=50, fontweight='bold')
-
-        verts = [(0.5, 0.5), (0.9, 0.4), (0.9, 0.3), (0.5,0.2)]
-        codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'blue', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.2), (0.1, 0.30), (0.1, 0.45), (0.5,0.55)]
-        codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'blue', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.55), (0.52, 0.545), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.56, 0.536), (0.58, 0.530), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.50), (0.56, 0.480), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'white', lw=20)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.50), (0.52, 0.495), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.56, 0.486), (0.58, 0.480), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-    elif P_class == "DTR (long)":
-        # Texte
-        axes.text(0.25, 0.7, r"DTR (long)", fontsize=50, fontweight='bold')
-        verts = [(0.5, 0.5), (0.9, 0.4), (0.9, 0.3), (0.5,0.2)]
-        codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'blue', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.2), (0.1, 0.30), (0.1, 0.45), (0.5,0.55)]
-        codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'blue', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.55), (0.52, 0.545), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.56, 0.536), (0.58, 0.530), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.62, 0.521), (0.64, 0.516), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.68, 0.507), (0.70, 0.501), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.50), (0.65, 0.460), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'white', lw=25)
-        axes.add_patch(patch)
-
-        verts = [(0.5, 0.50), (0.52, 0.495), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.56, 0.486), (0.58, 0.480), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.62, 0.471), (0.64, 0.465), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-        verts = [(0.68, 0.456), (0.70, 0.450), (0, 0)]
-        codes = [Path.MOVETO, Path.LINETO, Path.CLOSEPOLY]
-        path = Path(verts, codes)
-        patch = patches.PathPatch(path, facecolor='none', edgecolor = 'red', lw=15)
-        axes.add_patch(patch)
-
-    elif P_class == "Mu-like":
-        # Texte
-        axes.text(0.33, 0.7, r"Mu-like", fontsize=50, fontweight='bold')
-        axes.axhline(y=0.43, xmin=0.3, xmax=0.7, color='blue', linewidth=15)
-        axes.axhline(y=0.47, xmin=0.3, xmax=0.7, color='blue', linewidth=15)
-        axes.axhline(y=0.43, xmin=0.7, xmax=0.8, color='green', linewidth=15)
-        axes.axhline(y=0.47, xmin=0.7, xmax=0.8, color='green', linewidth=15)
-        axes.axhline(y=0.43, xmin=0.2, xmax=0.3, color='green', linewidth=15)
-        axes.axhline(y=0.47, xmin=0.2, xmax=0.3, color='green', linewidth=15)
-
-    elif P_left == "Random" and P_right == "Random":
-        # Texte
-        axes.text(0.25, 0.7, r"UNKNOWN", fontsize=50, fontweight='bold')
-        axes.text(0.44, 0.3, r"?", fontsize=200, fontweight='bold')
-    else:
-        # Texte
-        axes.text(0.4, 0.7, r"NEW", fontsize=50, fontweight='bold')
-        axes.text(0.44, 0.3, r"!", fontsize=200, fontweight='bold')
-
-    # Draw graph
-    if draw:
-        plt.savefig("%s_logo.png" % phagename, dpi=200)
-    fig = plt.figure(1)
-    return fig
-
-
-### OUTPUT Result files
-def exportDataSplit(sequence, split):
-    """Export sequence with split line length."""
-    seq = ""
-    for i in range((len(sequence)//split)+1):
-        seq += "".join(map(str,sequence[i*split:(i+1)*split])) + '\n'
-    return seq
-
-def ExportStatistics(phagename, whole_coverage, paired_whole_coverage, termini_coverage, phage_plus_norm, phage_minus_norm, paired, test_run):
-    """Export peaks statistics."""
-    if test_run:
-        return
-    export = pd.DataFrame()
-    # ORGANIZE Column
-    export["Position"]             = list(phage_plus_norm.sort_values("Position")["Position"])
-    if paired != "":
-        export["Coverage +"]       = paired_whole_coverage[0]
-    else:
-        export["Coverage +"]       = whole_coverage[0]
-    export["SPC +"]                = termini_coverage[0]
-    export["T +"]                  = [format(x/100.0,'0.2') for x in list(phage_plus_norm.sort_values("Position")["SPC_std"])]
-    export["T + (close)"]          = [format(x/100.0,'0.2') for x in list(phage_plus_norm.sort_values("Position")["SPC"])]
-    export["pvalue +"]             = [format(x,'0.2e') for x in list(phage_plus_norm.sort_values("Position")["pval_gamma"])]
-    export["padj +"]               = [format(x,'0.2e') for x in list(phage_plus_norm.sort_values("Position")["pval_gamma_adj"])]
-    if paired != "":
-        export["Coverage -"]       = whole_coverage[1]
-    else:
-        export["Coverage -"]       = paired_whole_coverage[1]
-    export["SPC -"]                = termini_coverage[1]
-    export["T -"]                  = [format(x/100.0,'0.2') for x in list(phage_minus_norm.sort_values("Position")["SPC_std"])]
-    export["T - (close)"]          = [format(x/100.0,'0.2') for x in list(phage_minus_norm.sort_values("Position")["SPC"])]
-    export["pvalue -"]             = [format(x,'0.2e') for x in list(phage_minus_norm.sort_values("Position")["pval_gamma"])]
-    export["padj -"]               = [format(x,'0.2e') for x in list(phage_minus_norm.sort_values("Position")["pval_gamma_adj"])]
-    filout = open(phagename + "_statistics.csv", "w")
-    filout.write(export.to_csv(index=False))
-    filout.close()
-    return
-
-def ExportCohesiveSeq(phagename, ArtcohesiveSeq, P_seqcoh, test_run, multi = 0):
-    """Export cohesive sequence of COS phages."""
-    if test_run:
-        return ""
-    if len(ArtcohesiveSeq) < 3 and len(P_seqcoh) < 3:
-        return ""
-    if len(ArtcohesiveSeq) < 20 and len(P_seqcoh) < 20:
-        export_text = "cohesive sequence"
-        if not multi:
-            filout = open(phagename + "_cohesive-sequence.fasta", "w")
-    else:
-        export_text = "direct terminal repeats sequence"
-        if not multi:
-            filout = open(phagename + "_direct-term-repeats.fasta", "w")
-    if P_seqcoh != '':
-        if not multi:
-            filout.write(">" + phagename + " " + export_text + " (Analysis: Statistics)\n" + exportDataSplit(P_seqcoh, 60))
-        else:
-            return ">" + phagename + " " + export_text + " (Analysis: Statistics)\n" + exportDataSplit(P_seqcoh, 60)
-    if ArtcohesiveSeq != '':
-        if not multi:
-            filout.write(">" + phagename + " " + export_text + " (Analysis: Li)\n" + exportDataSplit(ArtcohesiveSeq, 60))
-            filout.close()
-    return ""
-
-def ExportPhageSequence(phagename, P_left, P_right, refseq, P_orient, Redundant, Mu_like, P_class, P_seqcoh, test_run, multi = 0):
-    """Export the phage sequence reorganized and completed if needed."""
-    if test_run:
-        return ""
-    seq_out = ""
-    # Mu-like
-    if Mu_like:
-        if P_orient == "Forward":
-            if P_right != "Random":
-                if P_left > P_right:
-                    seq_out = refseq[P_right-1:P_left-1]
-                else:
-                    seq_out = refseq[P_right-1:] + refseq[:P_left-1]
-            else:
-                seq_out = refseq[P_left-1:] + refseq[:P_left-1]
-        elif P_orient == "Reverse":
-            if P_left != "Random":
-                if P_left > P_right:
-                    seq_out = reverseComplement(refseq[P_right-1:P_left-1])
-                else:
-                    seq_out = reverseComplement(refseq[P_right-1:] + reverseComplement(refseq[:P_left-1]))
-            else:
-                seq_out = reverseComplement(refseq[P_right-1:] + reverseComplement(refseq[:P_right-1]) )
-    # COS
-    elif isinstance(P_left, np.integer) and isinstance(P_right, np.integer):
-        # Cos or DTR
-        if P_class == "COS (3')":
-            if abs(P_left-P_right) > len(refseq)/2:
-                seq_out = refseq[min(P_left,P_right)-1:max(P_left,P_right)]
-            else:
-                seq_out = refseq[max(P_left,P_right)-1:] + refseq[:min(P_left,P_right)]
-            seq_out = seq_out + P_seqcoh
-        else:
-            # Genome
-            if abs(P_left-P_right) > len(refseq)/2:
-                seq_out = refseq[min(P_left,P_right)-1:max(P_left,P_right)]
-            else:
-                seq_out = refseq[max(P_left,P_right):] + refseq[:min(P_left,P_right)-1]
-            # COS 5'
-            if P_class == "COS (5')":
-                seq_out = P_seqcoh + seq_out
-            # DTR
-            else:
-                seq_out = P_seqcoh + seq_out + P_seqcoh
-    # PAC
-    elif isinstance(P_left, np.integer) or isinstance(P_right, np.integer):
-        if P_orient == "Reverse":
-            seq_out = reverseComplement(refseq[:P_right]) + reverseComplement(refseq[P_right:])
-        else:
-            seq_out = refseq[P_left-1:] + refseq[:P_left-1]
-    # Write Sequence
-    if multi:
-        return ">" + phagename + " sequence re-organized\n" + exportDataSplit(seq_out, 60)
-    else:
-        filout = open(phagename + "_sequence.fasta", "w")
-        filout.write(">" + phagename + " sequence re-organized\n" + exportDataSplit(seq_out, 60))
-        filout.close()
-    return ""
-
-def CreateReport(phagename, seed, added_whole_coverage, draw, Redundant, P_left, P_right, Permuted, P_orient, termini_coverage_norm_close, picMaxPlus_norm_close, picMaxMinus_norm_close, gen_len, tot_reads, P_seqcoh, phage_plus_norm, phage_minus_norm, ArtPackmode, termini, forward, reverse, ArtOrient, ArtcohesiveSeq, termini_coverage_close, picMaxPlus_close, picMaxMinus_close, picOUT_norm_forw, picOUT_norm_rev, picOUT_forw, picOUT_rev, lost_perc, ave_whole_cov, R1, R2, R3, host, host_len, host_whole_coverage, picMaxPlus_host, picMaxMinus_host, surrounding, drop_cov, paired, insert, phage_hybrid_coverage, host_hybrid_coverage, added_paired_whole_coverage, Mu_like, test_run, P_class, P_type, P_concat, multi = 0, multiReport = 0, *args, **kwargs):
-    """Produce a PDF report."""
-    if not multi:
-        doc = SimpleDocTemplate("%s_PhageTerm_report.pdf" % phagename, pagesize=letter, rightMargin=10,leftMargin=10, topMargin=5, bottomMargin=10)
-        report=[]
-    else:
-        report = multiReport
-
-    styles=getSampleStyleSheet()
-    styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
-    styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER))
-    styles.add(ParagraphStyle(name='Right', alignment=TA_RIGHT))
-    styles.add(ParagraphStyle(name='Left', alignment=TA_LEFT))
-
-    ### GENERAL INFORMATION
-    
-    # TITLE
-    ptext = '<b><font size=16>' + phagename + ' PhageTerm Analysis</font></b>'
-    report.append(Paragraph(ptext, styles["Center"]))
-    report.append(Spacer(1, 15))
-    
-    ## ZOOMED TERMINI GRAPH AND LOGO RESULT
-    
-    # LOGO SLECTION
-    
-    imgdata = io.BytesIO()
-    fig_logo = GraphLogo(P_class, P_left, P_right, draw)
-    fig_logo.savefig(imgdata, format='png')
-    imgdata.seek(0)
-    IMG = ImageReader(imgdata)
-    IMAGE_2 = Image(IMG.fileName, width=150, height=150, kind='proportional')
-    IMAGE_2.hAlign = 'CENTER'
-
-    # Zoom on inter-termini seq
-    if isinstance(P_left, np.integer) and isinstance(P_right, np.integer) and not Mu_like:
-        Zoom_left  = min(P_left-1000, P_right-1000)
-        Zoom_right = max(P_left+1000, P_right+1000)
-        imgdata = io.BytesIO()
-        if P_orient == "Reverse":
-            zoom_pos_left  = P_right-max(0,Zoom_left)
-            zoom_pos_right = P_left-max(0,Zoom_left)
-        else:
-            zoom_pos_left  = P_left-max(0,Zoom_left)
-            zoom_pos_right = P_right-max(0,Zoom_left)
-
-        figZ_whole = GraphWholeCov(added_whole_coverage[max(0,Zoom_left):min(gen_len,Zoom_right)], phagename + "-zoom", draw, P_left, P_right, zoom_pos_left, zoom_pos_right, 1, "Zoom Termini")
-        figZ_whole.savefig(imgdata, format='png')
-        imgdata.seek(0)
-        IMG = ImageReader(imgdata)
-        IMAGE = Image(IMG.fileName, width=275, height=340, kind='proportional')
-        IMAGE.hAlign = 'CENTER'
-
-        data = [[IMAGE, IMAGE_2]]
-        t=Table(data, 1*[4*inch]+1*[3*inch], 1*[2*inch], hAlign='CENTER', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        report.append(Spacer(1, 5))
-
-    elif isinstance(P_left, np.integer) and P_orient == "Forward":
-        imgdata = io.BytesIO()
-
-        if Mu_like:
-            figZL_whole = GraphWholeCov(phage_hybrid_coverage[0][max(0,P_left-1000):min(gen_len,P_left+1000)], phagename + "-zoom-left", draw, P_left, "", P_left-max(0,P_left-1000), 0, 1, "Zoom Termini")
-        else:
-            figZL_whole = GraphWholeCov(added_whole_coverage[max(0,P_left-1000):min(gen_len,P_left+1000)], phagename + "-zoom-left", draw, P_left, P_right, P_left-max(0,P_left-1000), 0, 1, "Zoom Termini")
-        figZL_whole.savefig(imgdata, format='png')
-        imgdata.seek(0)
-        IMG = ImageReader(imgdata)
-        IMAGE = Image(IMG.fileName, width=275, height=340, kind='proportional')
-        IMAGE.hAlign = 'CENTER'
-    
-        data = [[IMAGE, IMAGE_2]]
-        t=Table(data, 1*[5*inch]+1*[3*inch], 1*[2*inch], hAlign='CENTER', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-    elif isinstance(P_right, np.integer) and P_orient == "Reverse":
-        imgdata = io.BytesIO()
-        
-        if Mu_like:
-            figZR_whole = GraphWholeCov(phage_hybrid_coverage[1][max(0,P_right-1000):min(gen_len,P_right+1000)], phagename + "-zoom-right", draw, "", P_right, 0, P_right-max(0,P_right-1000), 1, "Zoom Termini")
-        else:
-            figZR_whole = GraphWholeCov(added_whole_coverage[max(0,P_right-1000):min(gen_len,P_right+1000)], phagename + "-zoom-right", draw, P_left, P_right, 0, P_right-max(0,P_right-1000), 1, "Zoom Termini")
-        figZR_whole.savefig(imgdata, format='png')
-        imgdata.seek(0)
-        IMG = ImageReader(imgdata)
-        IMAGE = Image(IMG.fileName, width=275, height=340, kind='proportional')
-        IMAGE.hAlign = 'CENTER'
-        
-        data = [[IMAGE, IMAGE_2]]
-        t=Table(data, 1*[5*inch]+1*[3*inch], 1*[2*inch], hAlign='CENTER', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        report.append(Spacer(1, 5))
-    else:
-        data = [[IMAGE_2]]
-        t=Table(data, 1*[1.5*inch], 1*[2*inch], hAlign='CENTER', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-    # Warning coverage message
-    if ave_whole_cov < 50 and test_run == 0:
-        ptextawc = "- - - - - - - - - WARNING: Coverage (" + str(int(ave_whole_cov)) + ") is under the limit of the software, Please consider results carrefuly. - - - - - - - - -"
-        data = [[ptextawc]]
-        t=Table(data, 1*[5*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('TEXTCOLOR',(0,0),(-1,-1),'RED'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-    ## Statistics
-    ptext = '<u><font size=14>PhageTerm Method</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-    
-    if Redundant:
-        Ends = "Redundant"
-    else:
-        Ends = "Non Red."
-
-    data = [["Ends", "Left (red)", "Right (green)", "Permuted", "Orientation", "Class", "Type"], [Ends, P_left, P_right, Permuted, P_orient, P_class, P_type]]
-    t=Table(data, 7*[1.10*inch], 2*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    report.append(t)
-    report.append(Spacer(1, 5))
-    
-    # Seq cohesive or Direct terminal repeats
-    if P_seqcoh != "":
-        if len(P_seqcoh) < 20:
-            ptext = '<i><font size=12>*Sequence cohesive: ' + P_seqcoh + '</font></i>'
-        else:
-            ptext = '<i><font size=12>*Direct Terminal Repeats: ' + str(len(P_seqcoh)) + ' bp</font></i>'
-        report.append(Paragraph(ptext, styles["Left"]))
-
-    # Multiple / Multiple (Nextera)
-    if P_left == "Multiple" and P_right == "Multiple":
-        ptext = '<i><font size=12>*This results could be due to a non-random fragmented sequence (e.g. Nextera)</font></i>'
-        report.append(Paragraph(ptext, styles["Left"]))
-    
-
-    # Concatermer
-    elif P_class[:7] == "Headful" and paired != "":
-        ptext = '<i><font size=12>*concatemer estimation: ' + str(P_concat) + '</font></i>'
-        report.append(Paragraph(ptext, styles["Left"]))
-
-    # Mu hybrid
-    elif Mu_like:
-        if P_orient == "Forward":
-            Mu_termini = P_left
-        else:
-            Mu_termini = P_right
-        ptext = '<i><font size=12>*Mu estimated termini position with hybrid fragments: ' + str(Mu_termini) + '</font></i>'
-        report.append(Paragraph(ptext, styles["Left"]))
-
-    report.append(Spacer(1, 10))
-
-    # Results
-    imgdata = io.BytesIO()
-    figP_norm = GraphCov(termini_coverage_norm_close, picMaxPlus_norm_close[:1], picMaxMinus_norm_close[:1], phagename + "-norm", 1, draw)
-    figP_norm.savefig(imgdata, format='png')
-    imgdata.seek(0)
-    IMG = ImageReader(imgdata)
-    IMAGE = Image(IMG.fileName, width=240, height=340, kind='proportional')
-    IMAGE.hAlign = 'CENTER'
-
-    data = [["Strand", "Location", "T", "pvalue", "T (Start. Pos. Cov. / Whole Cov.)"], ["+",phage_plus_norm["Position"][0],format(phage_plus_norm["SPC"][0]/100.0, '0.2f'),format(phage_plus_norm["pval_gamma_adj"][0], '0.2e'),IMAGE], ["",phage_plus_norm["Position"][1],format(phage_plus_norm["SPC"][1]/100.0, '0.2f'),format(phage_plus_norm["pval_gamma_adj"][1], '0.2e'),""], ["",phage_plus_norm["Position"][2],format(phage_plus_norm["SPC"][2]/100.0, '0.2f'),format(phage_plus_norm["pval_gamma_adj"][2], '0.2e'),""], ["",phage_plus_norm["Position"][3],format(phage_plus_norm["SPC"][3]/100.0, '0.2f'),format(phage_plus_norm["pval_gamma_adj"][3], '0.2e'),""], ["",phage_plus_norm["Position"][4],format(phage_plus_norm["SPC"][4]/100.0, '0.2f'),format(phage_plus_norm["pval_gamma_adj"][4], '0.2e'),""], ["-",phage_minus_norm["Position"][0],format(phage_minus_norm["SPC"][0]/100.0, '0.2f'),format(phage_minus_norm["pval_gamma_adj"][0], '0.2e'),""], ["",phage_minus_norm["Position"][1],format(phage_minus_norm["SPC"][1]/100.0, '0.2f'),format(phage_minus_norm["pval_gamma_adj"][1], '0.2e'),""], ["",phage_minus_norm["Position"][2],format(phage_minus_norm["SPC"][2]/100.0, '0.2f'),format(phage_minus_norm["pval_gamma_adj"][2], '0.2e'),""], ["",phage_minus_norm["Position"][3],format(phage_minus_norm["SPC"][3]/100.0, '0.2f'),format(phage_minus_norm["pval_gamma_adj"][3], '0.2e'),""], ["",phage_minus_norm["Position"][4],format(phage_minus_norm["SPC"][4]/100.0, '0.2f'),format(phage_minus_norm["pval_gamma_adj"][4], '0.2e'),""]]
-    t=Table(data, 4*[1*inch]+1*[4*inch], 11*[0.25*inch], hAlign='CENTER', style=[('SPAN',(0,1),(0,5)), ('SPAN',(0,6),(0,10)), ('SPAN',(4,1),(4,10)), ('LINEABOVE',(0,1),(4,1),1.5,colors.black), ('LINEABOVE',(0,6),(4,6),1.5,colors.grey), ('FONT',(0,0),(-1,0),'Helvetica-Bold'), ('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),12), ('FONTSIZE',(0,1),(0,-1),16), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-
-    report.append(t)
-    report.append(Spacer(1, 5))
-
-    ## Li's Analysis
-    ptext = '<u><font size=14>Li\'s Method</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-
-    data = [["Packaging", "Termini", "Forward", "Reverse", "Orientation"], [ArtPackmode, termini, forward, reverse, ArtOrient]]
-    t=Table(data, 2*[1*inch] + 2*[2*inch] + 1*[1*inch], 2*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-
-    report.append(t)
-    report.append(Spacer(1, 5))
-
-    # Seq cohesive or Direct terminal repeats
-    if len(ArtcohesiveSeq) > 2:
-        if len(ArtcohesiveSeq) < 20:
-            ptext = '<i><font size=12>*Sequence cohesive: ' + ArtcohesiveSeq + '</font></i>'
-        else:
-            ptext = '<i><font size=12>*Direct Terminal Repeats: ' + str(len(ArtcohesiveSeq)) + ' bp</font></i>'
-        report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-
-    # Results
-    imgdata = io.BytesIO()
-    figP = GraphCov(termini_coverage_close, picMaxPlus_close[:1], picMaxMinus_close[:1], phagename, 0, draw)
-    figP.savefig(imgdata, format='png')
-    imgdata.seek(0)
-    IMG = ImageReader(imgdata)
-    IMAGE = Image(IMG.fileName, width=240, height=340, kind='proportional')
-    IMAGE.hAlign = 'CENTER'
-    
-    data = [["Strand", "Location", "SPC", "R", "SPC"],["+",picMaxPlus_close[0][1]+1,picMaxPlus_close[0][0],R2,IMAGE],["",picMaxPlus_close[1][1]+1,picMaxPlus_close[1][0],"-",""],["",picMaxPlus_close[2][1]+1,picMaxPlus_close[2][0],"-",""],["",picMaxPlus_close[3][1]+1,picMaxPlus_close[3][0],"-",""],["",picMaxPlus_close[4][1]+1,picMaxPlus_close[4][0],"-",""],["-",picMaxMinus_close[0][1]+1,picMaxMinus_close[0][0],R3,""], ["",picMaxMinus_close[1][1]+1,picMaxMinus_close[1][0],"-",""], ["",picMaxMinus_close[2][1]+1,picMaxMinus_close[2][0],"-",""], ["",picMaxMinus_close[3][1]+1,picMaxMinus_close[3][0],"-",""], ["",picMaxMinus_close[4][1]+1,picMaxMinus_close[4][0],"-",""]]
-    t=Table(data, 4*[1*inch]+1*[4*inch], 11*[0.25*inch], hAlign='CENTER', style=[('SPAN',(0,1),(0,5)), ('SPAN',(0,6),(0,10)), ('SPAN',(4,1),(4,10)), ('LINEABOVE',(0,1),(4,1),1.5,colors.black), ('LINEABOVE',(0,6),(4,6),1.5,colors.grey), ('FONT',(0,0),(-1,0),'Helvetica-Bold'), ('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),12), ('FONTSIZE',(0,1),(0,-1),16), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    
-    report.append(t)
-
-
-    # NEW PAGE
-    report.append(PageBreak())
-
-    # HOST RESULTS
-    if host != "":
-        # Host coverage
-        ptext = '<u><font size=14>Host Analysis</font></u>'
-        report.append(Paragraph(ptext, styles["Left"]))
-        report.append(Spacer(1, 10))
-
-        ptext = '<i><font size=10></font>Reads that does not match on the phage genome are tested on the host genome. These reads could come from Phage transduction but also Host DNA contamination.</i>'
-        report.append(Paragraph(ptext, styles["Justify"]))
-        report.append(Spacer(1, 5))
-        
-        data = [["Host Genome", str(host_len) + " bp"]]
-        t=Table(data, 2*[2.25*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,0),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'LEFT') ,('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-
-        report.append(t)
-        report.append(Spacer(1, 5))
-
-        imgdata = io.BytesIO()
-
-        figH = GraphCov(host_whole_coverage, picMaxPlus_host[:1], picMaxMinus_host[:1], "", 0, draw)
-        figH.savefig(imgdata, format='png')
-        imgdata.seek(0)
-        IMG = ImageReader(imgdata)
-        IMAGE = Image(IMG.fileName, width=240, height=340, kind='proportional')
-        IMAGE.hAlign = 'CENTER'
-
-        data = [["Strand", "Location", "Coverage", "-", "Whole Coverage"],["+",picMaxPlus_host[0][1]+1,picMaxPlus_host[0][0],"-",IMAGE],["",picMaxPlus_host[1][1]+1,picMaxPlus_host[1][0],"-",""],["",picMaxPlus_host[2][1]+1,picMaxPlus_host[2][0],"-",""],["",picMaxPlus_host[3][1]+1,picMaxPlus_host[3][0],"-",""],["",picMaxPlus_host[4][1]+1,picMaxPlus_host[4][0],"-",""],["-",picMaxMinus_host[0][1]+1,picMaxMinus_host[0][0],"-",""], ["",picMaxMinus_host[1][1]+1,picMaxMinus_host[1][0],"-",""], ["",picMaxMinus_host[2][1]+1,picMaxMinus_host[2][0],"-",""], ["",picMaxMinus_host[3][1]+1,picMaxMinus_host[3][0],"-",""], ["",picMaxMinus_host[4][1]+1,picMaxMinus_host[4][0],"-",""]]
-        t=Table(data, 4*[1*inch]+1*[4*inch], 11*[0.25*inch], hAlign='CENTER', style=[('SPAN',(0,1),(0,5)), ('SPAN',(0,6),(0,10)), ('SPAN',(4,1),(4,10)), ('LINEABOVE',(0,1),(4,1),1.5,colors.black), ('LINEABOVE',(0,6),(4,6),1.5,colors.grey), ('FONT',(0,0),(-1,0),'Helvetica-Bold'), ('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),12), ('FONTSIZE',(0,1),(0,-1),16), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-
-        report.append(t)
-        report.append(Spacer(1, 10))
-
-        # Hybrid coverage
-        ptext = '<u><font size=14>Hybrid Analysis</font></u>'
-        report.append(Paragraph(ptext, styles["Left"]))
-        report.append(Spacer(1, 10))
-
-        ptext = '<i><font size=10></font>Hybrid reads with one edge on the phage genome and the other edge on the host genome are detected. Phage Hybrid Coverages are used to detect Mu-like packaging mode. Host Hybrid Coverages could be used to detect Phage Transduction but also genome integration location of prophages.</i>'
-        report.append(Paragraph(ptext, styles["Justify"]))
-        report.append(Spacer(1, 5))
-
-        picMaxPlus_phage_hybrid, picMaxMinus_phage_hybrid, TopFreqH_phage_hybrid = picMax(phage_hybrid_coverage, 5)
-        picMaxPlus_host_hybrid, picMaxMinus_host_hybrid, TopFreqH_host_hybrid    = picMax(host_hybrid_coverage, 5)
-
-        imgdataPH      = io.BytesIO()
-        figPH          = GraphCov(phage_hybrid_coverage, picMaxPlus_phage_hybrid[:1], picMaxMinus_phage_hybrid[:1], "", 0, draw, 1)
-        figPH.savefig(imgdataPH, format='png')
-        imgdataPH.seek(0)
-        IMGPH          = ImageReader(imgdataPH)
-        IMAGEPH        = Image(IMGPH.fileName, width=240, height=340, kind='proportional')
-        IMAGEPH.hAlign = 'CENTER'
-
-
-        imgdataHH      = io.BytesIO()
-        figHH          = GraphCov(host_hybrid_coverage, picMaxPlus_host_hybrid[:1], picMaxMinus_host_hybrid[:1], "", 0, draw, 1)
-        figHH.savefig(imgdataHH, format='png')
-        imgdataHH.seek(0)
-        IMGHH          = ImageReader(imgdataHH)
-        IMAGEHH        = Image(IMGHH.fileName, width=240, height=340, kind='proportional')
-        IMAGEHH.hAlign = 'CENTER'
-
-        data = [["Phage Hybrid Coverage", "Host Hybrid Coverage"],[IMAGEPH,IMAGEHH]]
-        t=Table(data, 2*[4*inch], 1*[0.25*inch]+1*[2.5*inch], hAlign='CENTER', style=[('LINEABOVE',(0,1),(1,1),1.5,colors.black),('FONT',(0,0),(-1,-1),'Helvetica-Bold'),('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-
-        report.append(t)
-        report.append(Spacer(1, 10))
-
-        # NEW PAGE
-        report.append(PageBreak())
-
-
-    # DETAILED RESULTS
-    ptext = '<u><font size=14>Analysis Methodology</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-    
-    ptext = '<i><font size=10>PhageTerm software uses raw reads of a phage sequenced with a sequencing technology using random fragmentation and its genomic reference sequence to determine the termini position. The process starts with the alignment of NGS reads to the phage genome in order to calculate the starting position coverage (SPC), where a hit is given only to the position of the first base in a successfully aligned read (the alignment algorithm uses the lenght of the seed (default: 20) for mapping and does not accept gap or missmatch to speed up the process). Then the program apply 2 distinct scoring methods: i) a statistical approach based on the Gamma law; and ii) a method derived from LI and al. 2014 paper.</font></i>'
-    report.append(Paragraph(ptext, styles["Justify"]))
-    report.append(Spacer(1, 5))
-
-    
-    # INFORMATION
-    ptext = '<u><font size=12>General set-up and mapping informations</font></u>'
-    report.append(Paragraph(ptext, styles["Justify"]))
-    report.append(Spacer(1, 5))
-    
-
-    imgdata = io.BytesIO()
-    
-    if paired != "":
-        figP_whole = GraphWholeCov(added_paired_whole_coverage, phagename, draw)
-    else:
-        figP_whole = GraphWholeCov(added_whole_coverage, phagename, draw)
-    figP_whole.savefig(imgdata, format='png')
-    imgdata.seek(0)
-    IMG            = ImageReader(imgdata)
-    IMAGE          = Image(IMG.fileName, width=275, height=340, kind='proportional')
-    IMAGE.hAlign   = 'CENTER'
-    
-    if host == "":
-        host_analysis = "No"
-    else:
-        host_analysis = "Yes"
-
-    if paired == "":
-        sequencing_reads = "Single-ends Reads"
-    else:
-        sequencing_reads = "Paired-ends Reads"
-    
-    data = [["Phage Genome ", str(gen_len) + " bp",IMAGE], ["Sequencing Reads", int(tot_reads),""], ["Mapping Reads", str(int(100 - lost_perc)) + " %",""], ["OPTIONS","",""], ["Mapping Seed",seed,""], ["Surrounding",surrounding,""], ["Host Analysis ", host_analysis,""], ["","",""]]
-    t=Table(data, 1*[2.25*inch]+1*[1.80*inch]+1*[4*inch], 8*[0.25*inch], hAlign='LEFT', style=[('SPAN',(2,0),(2,-1)), ('FONT',(0,0),(0,2),'Helvetica-Bold'), ('FONT',(0,3),(0,3),'Helvetica-Oblique'), ('FONT',(0,4),(1,-1),'Helvetica-Oblique'), ('FONT',(2,0),(2,0),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-2),'LEFT'), ('ALIGN',(2,0),(2,-1),'CENTER') ,('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    
-    report.append(t)
-    report.append(Spacer(1, 5))
-
-
-    # Img highest peaks of each side even if no significative
-    ptext = '<u><font size=12>Highest peak of each side coverage graphics</font></u>'
-    report.append(Paragraph(ptext, styles["Justify"]))
-    report.append(Spacer(1, 5))
-
-
-    imgdata = io.BytesIO()
-
-    if Mu_like and isinstance(P_left, np.integer):
-        figHL_whole = GraphWholeCov(phage_hybrid_coverage[0][max(0,P_left-1000):min(gen_len,P_left+1000)], phagename + "-zoom-left", draw, P_left, "", P_left-max(0,P_left-1000), 0, 1, "Zoom Termini")
-    else:
-        P_left  = phage_plus_norm["Position"][0]
-        figHL_whole = GraphWholeCov(added_whole_coverage[max(0,P_left-1000):min(gen_len,P_left+1000)], phagename + "-zoom-left", draw, P_left, "", P_left-max(0,P_left-1000), 0, 1, "Zoom Termini")
-    figHL_whole.savefig(imgdata, format='png')
-    imgdata.seek(0)
-    IMG = ImageReader(imgdata)
-    IMAGE = Image(IMG.fileName, width=275, height=340, kind='proportional')
-    IMAGE.hAlign = 'CENTER'
-
-    imgdata2 = io.BytesIO()
-
-    if Mu_like and isinstance(P_right, np.integer):
-        figHR_whole = GraphWholeCov(phage_hybrid_coverage[1][max(0,P_right-1000):min(gen_len,P_right+1000)], phagename + "-zoom-right", draw, "", P_right, 0, P_right-max(0,P_right-1000), 1, "Zoom Termini")
-    else:
-        P_right = phage_minus_norm["Position"][0]
-        figHR_whole = GraphWholeCov(added_whole_coverage[max(0,P_right-1000):min(gen_len,P_right+1000)], phagename + "-zoom-right", draw, "", P_right, 0, P_right-max(0,P_right-1000), 1, "Zoom Termini")
-    figHR_whole.savefig(imgdata2, format='png')
-    imgdata2.seek(0)
-    IMG2 = ImageReader(imgdata2)
-    IMAGE2 = Image(IMG2.fileName, width=275, height=340, kind='proportional')
-    IMAGE2.hAlign = 'CENTER'
-
-    if Mu_like:
-        data = [["Hybrid Coverage Zoom (Left)", "Hybrid Coverage Zoom (Right)"],[IMAGE,IMAGE2]]
-    else:
-        data = [["Whole Coverage Zoom (Left)", "Whole Coverage Zoom (Right)"],[IMAGE,IMAGE2]]
-    t=Table(data, 2*[4*inch], 1*[0.25*inch]+1*[2*inch], hAlign='CENTER', style=[('LINEABOVE',(0,1),(1,1),1.5,colors.black),('FONT',(0,0),(-1,-1),'Helvetica-Bold'),('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    report.append(t)
-
-    # Controls
-    ptext = '<u><font size=12>General controls information</font></u>'
-    report.append(Paragraph(ptext, styles["Justify"]))
-    report.append(Spacer(1, 5))
-
-    if ave_whole_cov < 50:
-        ptextawc = "WARNING: Under the limit of the software (50)"
-    elif ave_whole_cov < 200:
-        ptextawc = "WARNING: Low (<200), Li's method could not be reliable"
-    else:
-        ptextawc = "OK"
-
-    data = [["Whole genome coverage", int(ave_whole_cov), ptextawc]]
-    t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[3.5*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    report.append(t)
-
-    drop_perc = len([i for i in added_whole_coverage if i < (ave_whole_cov/2)]) / float(len(added_whole_coverage))
-    if drop_perc < 1:
-        ptextdp = "OK"
-    else:
-        ptextdp = "Check your genome reference"
-
-    data = [["Weak genome coverage", "%.1f %%" %drop_perc, ptextdp]]
-    t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[4*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    report.append(t)
-
-    if paired != "":
-        if len(insert) != 0:
-            insert_mean = sum(insert)/len(insert)
-        else:
-            insert_mean = "-"
-        data = [["Insert mean size", insert_mean, "Mean insert estimated from paired-end reads"]]
-        t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[4*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-    if lost_perc > 25:
-        ptextlp = "Warning: high percentage of reads lost"
-    else:
-        ptextlp = "OK"
-
-    data = [["Reads lost during alignment", "%.1f %%" %lost_perc, ptextlp]]
-    t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[4*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-    report.append(t)
-    report.append(Spacer(1, 5))
-
-    # DETAILED SCORE
-    ptext = '<b><font size=14>i) PhageTerm method</font></b>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-
-    ptext = '<i><font size=10>Reads are mapped on the reference to determine the starting position coverage (SPC) as well as the coverage (COV) in each orientation. These values are then used to compute the variable T = SPC/COV. The average value of T at positions along the genome that are not termini is expected to be 1/F, where F is the average fragment size. For the termini that depends of the packaging mode. Cos Phages: no reads should start before the terminus and therefore T=1. DTR phages: for N phages present in the sample, there should be N fragments that start at the terminus and N fragments that cover the edge of the repeat on the other side of the genome as a results T is expected to be 0.5. Pac phages: for N phages in the sample, there should be N/C fragments starting at the pac site, where C is the number of phage genome copies per concatemer. In the same sample N fragments should cover the pac site position, T is expected to be (N/C)/(N+N/C) = 1/(1+C). To assess whether the number of reads starting at a given position along the genome can be considered a significant outlier, PhageTerm first segments the genome according to coverage using a regression tree. A gamma distribution is fitted to SPC for each segment and an adjusted p-value is computed for each position. If several significant peaks are detected within a small sequence window (default: 20bp), their X values are merged.</font></i>'
-    report.append(Paragraph(ptext, styles["Justify"]))
-    report.append(Spacer(1, 5))
-
-    # surrounding
-    if surrounding > 0:
-        data = [["Nearby Termini (Forward / Reverse)", str(len(picOUT_norm_forw)-1) + " / " + str(len(picOUT_norm_rev)-1), "Peaks localized %s bases around the maximum" %surrounding]]
-        t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[4*inch], 1*[0.25*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-    report.append(Spacer(1, 10))
-
-    # Li's Method
-    if not multi:
-        ptext = '<b><font size=14>ii) Li\'s method</font></b>'
-        report.append(Paragraph(ptext, styles["Left"]))
-        report.append(Spacer(1, 10))
-        
-        ptext = '<i><font size=10>The second approach is based on the calculation and interpretation of three specific ratios R1, R2 and R3 as suggested in a previous publication from Li et al. 2014. The first ratio, is calculated as follow: the highest starting frequency found on either the forward or reverse strands is divided by the average starting frequency, R1 = (highest frequency/average frequency). Li’s et al. have proposed three possible interpretation of the R1 ratio. First, if R1 < 30, the phage genome does not have any termini, and is either circular or completely permuted and terminally redundant. The second interpretation for R1 is when 30 ≤ R1 ≥ 100, suggesting the presence of preferred termini with terminal redundancy and apparition of partially circular permutations. At last if R1 > 100 that is an indication that at least one fixed termini is present with terminase recognizing a specific site. The two other ratios are R2 and R3 and the calculation is done in a similar manner. R2 is calculated using the highest two frequencies (T1-F and T2-F) found on the forward strand and R3 is calculated using the highest two frequencies (T1-R and T2-R) found on the reverse strand. To calculate these two ratios, we divide the highest frequency by the second highest frequency T2.  So R2 = (T1-F / T2-F) and R3 = (T1-R / T2-R). These two ratios are used to analyze termini characteristics on each strand taken individually. Li et al. suggested two possible interpretations for R2 and R3 ratios combine to R1. When R1 < 30 and R2 < 3, we either have no obvious termini on the forward strand, or we have multiple preferred termini on the forward strand, if 30 ≤ R1 ≤ 100.  If R2 > 3, it is suggested that there is an obvious unique termini on the forward strand. The same reasoning is applicable for the result of R3. Combining the results for ratios found with this approach, it is possible to make the first prediction for the viral packaging mode of the analyzed phage. A unique obvious termini present at both ends (both R2 and R3 > 3) reveals the presence of a COS mode of packaging. The headful mode of packaging PAC is concluded when we have a single obvious termini only on one strand.</font></i>'
-        report.append(Paragraph(ptext, styles["Justify"]))
-        report.append(Spacer(1, 5))
-        
-        if surrounding > 0:
-            data = [["Nearby Termini (Forward / Reverse)", str(len(picOUT_forw)-1) + " / " + str(len(picOUT_rev)-1), "Peaks localized %s bases around the maximum" %surrounding]]
-            t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[3.5*inch], 1*[0.25*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-            report.append(Spacer(1, 5))
-
-        if R1 > 100:
-            ptextR1 = "At least one fixed termini is present with terminase recognizing a specific site."
-        elif R1 > 30:
-            ptextR1 = "Presence of preferred termini with terminal redundancy and apparition of partially circular permutations."
-        else:
-            ptextR1 = "Phage genome does not have any termini, and is either circular or completely permuted and terminally redundant."
-        
-        data = [["R1 - highest freq./average freq.", int(R1), Paragraph(ptextR1, styles["Justify"])]]
-        t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[3.5*inch], 1*[0.25*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        report.append(Spacer(1, 5))
-        
-        if R2 < 3 and R1 < 30:
-            ptextR2 = "No obvious termini on the forward strand."
-        elif R2 < 3 :
-            ptextR2 = "Multiple preferred termini on the forward strand."
-        elif R2 >= 3:
-            ptextR2 = "Unique termini on the forward strand."
-        
-        data = [["R2 Forw - highest freq./second freq.", int(R2), Paragraph(ptextR2, styles["Justify"])]]
-        t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[3.5*inch], 1*[0.25*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        report.append(Spacer(1, 5))
-        
-        if R3 < 3 and R1 < 30:
-            ptextR3 = "No obvious termini on the reverse strand."
-        elif R3 < 3 :
-            ptextR3 = "Multiple preferred termini on the reverse strand."
-        elif R3 >= 3:
-            ptextR3 = "Unique termini on the reverse strand."
-        
-        data = [["R3 Rev - highest freq./second freq.", int(R3), Paragraph(ptextR3, styles["Justify"])]]
-        t=Table(data, 1*[3.5*inch]+1*[1*inch]+1*[3.5*inch], 1*[0.25*inch], hAlign='LEFT', style=[('FONT',(0,0),(0,-1),'Helvetica-Bold'), ('FONTSIZE',(0,0),(-1,-1),10), ('ALIGN',(0,0),(-1,-1),'LEFT'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-    # CREDITS and TIME
-    ptext = '<font size=8>%s</font>' % "Please cite: Sci. Rep. DOI 10.1038/s41598-017-07910-5"
-    report.append(Paragraph(ptext, styles["Center"]))
-    ptext = '<font size=8>%s</font>' % "Garneau, Depardieu, Fortier, Bikard and Monot. PhageTerm: Determining Bacteriophage Termini and Packaging using NGS data."
-    report.append(Paragraph(ptext, styles["Center"]))
-    ptext = '<font size=8>Report generated : %s</font>' % time.ctime()
-    report.append(Paragraph(ptext, styles["Center"]))
-
-    # CREATE PDF
-    if not multi:
-        doc.build(report)
-    else:
-        report.append(PageBreak())
-        return report
-    return
-
-def SummaryReport(phagename, DR, no_match):
-    """ Create first page of multi reports."""
-    report=[]
-    styles=getSampleStyleSheet()
-    styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
-    styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER)) 
-    styles.add(ParagraphStyle(name='Right', alignment=TA_RIGHT))
-    styles.add(ParagraphStyle(name='Left', alignment=TA_LEFT))
-    
-    ### GENERAL INFORMATION
-    
-    # TITLE
-    ptext = '<b><font size=16>' + phagename + ' PhageTerm Analysis</font></b>'
-    report.append(Paragraph(ptext, styles["Center"]))
-    report.append(Spacer(1, 15))
-    
-    # No Match
-    if len(no_match) > 0:
-        ptext = '<u><font size=14>No Match ('+ str(len(no_match)) +')</font></u>'
-        report.append(Paragraph(ptext, styles["Left"]))
-        report.append(Spacer(1, 10))
-
-        data = [["Name", "Class", "Left", "Right", "Type", "Orient", "Coverage", "Comments"]]
-        t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-1),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        
-        for contig in no_match:
-            P_comments = "No read match"
-            
-            data = [[contig, "-", "-", "-", "-", "-", 0, P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-    # COS Phages
-    count_COS = len(DR["COS (3')"]) + len(DR["COS (5')"]) + len(DR["COS"])
-    ptext = '<u><font size=14>COS Phages ('+ str(count_COS) +')</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-    
-    if count_COS != 0:
-        
-        data = [["Name", "Class", "Left", "Right", "Type", "Orient", "Coverage", "Comments"]]
-        t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-1),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-
-        for DC in DR["COS (3')"]:
-            P_comments = ""
-            if int(DR["COS (3')"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-        
-            data = [[DC, DR["COS (3')"][DC]["P_class"], DR["COS (3')"][DC]["P_left"], DR["COS (3')"][DC]["P_right"], DR["COS (3')"][DC]["P_type"], DR["COS (3')"][DC]["P_orient"], int(DR["COS (3')"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-        
-        for DC in DR["COS (5')"]:
-            P_comments = ""
-            if int(DR["COS (5')"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-
-            data = [[DC, DR["COS (5')"][DC]["P_class"], DR["COS (5')"][DC]["P_left"], DR["COS (5')"][DC]["P_right"], DR["COS (5')"][DC]["P_type"], DR["COS (5')"][DC]["P_orient"], int(DR["COS (5')"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        for DC in DR["COS"]:
-            P_comments = ""
-            if int(DR["COS"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-
-            data = [[DC, DR["COS"][DC]["P_class"], DR["COS"][DC]["P_left"], DR["COS"][DC]["P_right"], DR["COS"][DC]["P_type"], DR["COS"][DC]["P_orient"], int(DR["COS"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        report.append(Spacer(1, 5))
-
-    # DTR Phages
-    count_DTR = len(DR["DTR (short)"]) + len(DR["DTR (long)"])
-    ptext = '<u><font size=14>DTR Phages ('+ str(count_DTR) +')</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-
-    if count_DTR != 0:
-
-        data = [["Name", "Class", "Left", "Right", "Type", "Orient", "Coverage", "Comments"]]
-        t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-1),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        
-        for DC in DR["DTR (short)"]:
-            P_comments = ""
-            if int(DR["DTR (short)"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-            
-            data = [[DC, DR["DTR (short)"][DC]["P_class"], DR["DTR (short)"][DC]["P_left"], DR["DTR (short)"][DC]["P_right"], DR["DTR (short)"][DC]["P_type"], DR["DTR (short)"][DC]["P_orient"], int(DR["DTR (short)"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        for DC in DR["DTR (long)"]:
-            P_comments = ""
-            if int(DR["DTR (long)"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-            
-            data = [[DC, DR["DTR (long)"][DC]["P_class"], DR["DTR (long)"][DC]["P_left"], DR["DTR (long)"][DC]["P_right"], DR["DTR (long)"][DC]["P_type"], DR["DTR (long)"][DC]["P_orient"], int(DR["DTR (long)"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        report.append(Spacer(1, 5))
-
-    # Headful Phages
-    count_Headful = len(DR["Headful (pac)"])
-    ptext = '<u><font size=14>Headful Phages ('+ str(count_Headful) +')</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-
-    if count_Headful != 0:
-
-        data = [["Name", "Class", "Left", "Right", "Type", "Orient", "Coverage", "Comments"]]
-        t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-1),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        
-        for DC in DR["Headful (pac)"]:
-            P_comments = ""
-            if int(DR["Headful (pac)"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-            
-            data = [[DC, DR["Headful (pac)"][DC]["P_class"], DR["Headful (pac)"][DC]["P_left"], DR["Headful (pac)"][DC]["P_right"], DR["Headful (pac)"][DC]["P_type"], DR["Headful (pac)"][DC]["P_orient"], int(DR["Headful (pac)"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        report.append(Spacer(1, 5))
-
-    # OTHERS Phages
-    count_Others = len(DR["Mu-like"]) + len(DR["UNKNOWN"]) + len(DR["NEW"])
-    ptext = '<u><font size=14>Others Phages ('+ str(count_Others) +')</font></u>'
-    report.append(Paragraph(ptext, styles["Left"]))
-    report.append(Spacer(1, 10))
-
-    if count_Others != 0:
-
-        data = [["Name", "Class", "Left", "Right", "Type", "Orient", "Coverage", "Comments"]]
-        t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-1),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-        report.append(t)
-        
-        for DC in DR["Mu-like"]:
-            P_comments = ""
-            if int(DR["Mu-like"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-            
-            data = [[DC, DR["Mu-like"][DC]["P_class"], DR["Mu-like"][DC]["P_left"], DR["Mu-like"][DC]["P_right"], DR["Mu-like"][DC]["P_type"], DR["Mu-like"][DC]["P_orient"], int(DR["Mu-like"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        for DC in DR["NEW"]:
-            P_comments = ""
-            if int(DR["NEW"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-            
-            data = [[DC, DR["NEW"][DC]["P_class"], DR["NEW"][DC]["P_left"], DR["NEW"][DC]["P_right"], DR["NEW"][DC]["P_type"], DR["NEW"][DC]["P_orient"], int(DR["NEW"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        for DC in DR["UNKNOWN"]:
-            P_comments = ""
-            if int(DR["UNKNOWN"][DC]["ave_whole_cov"]) < 50:
-                P_comments = "Low coverage"
-
-            data = [[DC, DR["UNKNOWN"][DC]["P_class"], DR["UNKNOWN"][DC]["P_left"], DR["UNKNOWN"][DC]["P_right"], DR["UNKNOWN"][DC]["P_type"], DR["UNKNOWN"][DC]["P_orient"], int(DR["UNKNOWN"][DC]["ave_whole_cov"]), P_comments]]
-            t=Table(data, 2*[1.50*inch]+5*[0.80*inch]+1*[1.25*inch], 1*[0.25*inch], hAlign='CENTER', style=[('FONT',(0,0),(-1,-2),'Helvetica-Bold'), ('GRID',(0,0),(-1,-1),0.5,colors.grey), ('FONTSIZE',(0,0),(-1,-1),12), ('ALIGN',(0,0),(-1,-1),'CENTER'),('VALIGN',(0,0),(-1,-1),'MIDDLE')])
-            report.append(t)
-
-        report.append(Spacer(1, 5))
-
-    report.append(PageBreak())
-
-    return report
-
-def WorkflowReport(phagename, P_class, P_left, P_right, P_type, P_orient, ave_whole_cov, multi = 0, phage_plus_norm=None, phage_minus_norm=None,*args, **kwargs):
-    """ Text report for each phage."""
-
-    P_comments = ""
-    if ave_whole_cov < 50:
-        P_comments = "WARNING: Low coverage"
-    
-    if ave_whole_cov == 0:
-        P_comments = "No read match"
-
-    if not multi:
-        filoutWorkflow = open(phagename + "_workflow.txt", "w")
-        filoutWorkflow.write("#phagename\tClass\tLeft\tRight\tType\tOrient\tCoverage\tComments\n")
-        filoutWorkflow.write(phagename + "\t" + P_class + "\t" + str(P_left) + "\t" + str(P_right) + "\t" + P_type + "\t" + P_orient + "\t" + str(ave_whole_cov) + "\t" + P_comments + "\n")
-        filoutWorkflow.close()
-    else:
-        pval_left_peak="-"
-        pval_adj_left_peak="-"
-        pval_right_peak="-"
-        pval_adj_right_peak="-"
-        if isinstance(P_left,np.int64):
-            # get pvalue and adjusted pvalue for this + peak
-            left_peak_infos=phage_plus_norm.loc[phage_plus_norm['Position']==P_left]
-            pval_left_peak=left_peak_infos["pval_gamma"]
-            pval_left_peak=pval_left_peak.values[0]
-            pval_adj_left_peak=left_peak_infos["pval_gamma_adj"]
-            pval_adj_left_peak =pval_adj_left_peak.values[0]
-        if isinstance(P_right,np.int64):
-            # get pvalue and adjusted pvalue for this + peak
-            right_peak_infos=phage_minus_norm.loc[phage_minus_norm['Position']==P_right]
-            pval_right_peak=right_peak_infos["pval_gamma"]
-            pval_right_peak=pval_right_peak.values[0]
-            pval_adj_right_peak=right_peak_infos["pval_gamma_adj"]
-            pval_adj_right_peak=pval_adj_right_peak.values[0]
-        return phagename + "\t" + P_class + "\t" + str(P_left) + "\t" +str(pval_left_peak)+ "\t" +str(pval_adj_left_peak)+\
-               "\t" + str(P_right) + "\t" + str(pval_right_peak) + "\t" + str(pval_adj_right_peak)+ "\t" + P_type +\
-               "\t" + P_orient + "\t" + str(ave_whole_cov) + "\t" + P_comments + "\n"
-    return
-
-def EstimateTime(secondes):
-    """ Convert secondes into time."""
-    conv = (86400,3600,60,1)
-    result = [0,0,0,0]
-    i=0
-    while secondes>0:
-        result[i]= secondes/conv[i]
-        secondes=secondes-result[i]*conv[i]
-        i+=1
-    return str(result[0]) + " Days " + str(result[1]) + " Hrs " + str(result[2]) + " Min " + str(result[3]) + " Sec"
-
-
-
-
-
-
diff --git a/_modules/main_utils.py b/_modules/main_utils.py
deleted file mode 100755
index f6c35d09301018c5a4b57323bd9ad030554649b2..0000000000000000000000000000000000000000
--- a/_modules/main_utils.py
+++ /dev/null
@@ -1,493 +0,0 @@
-##@file main_utils.py
-#
-# Contains utility functions and classes for the main program.
-# Aim is to make main simpler and smaller and thus improve testability (by allowing separate/independant testing of small program "subparts").
-
-# Note about main program's options. This is to be discussed and subject to change.
-# -g + --mapping_res_dir : Assume we are on a cluster. Perform mapping only and save results to files
-# --mapping_res_dir+ --cov_res_dir : assume we are on a cluster. Process mapping results stored in files and puts the readsCoverage results in other files.
-# Will use a job array in that case. each Phageterm will process 1 chunk for 1 sequence
-from __future__ import print_function
-
-from time import gmtime, strftime
-import sys
-import gzip
-from optparse import OptionParser, OptionGroup
-from _modules.utilities import checkReportTitle,changeCase
-from _modules.IData_handling import totReads,genomeFastaRecovery
-
-usage = """\n\nUsage: %prog -f reads.fastq -r phage_sequence.fasta [--report_title analysis_name -p reads_paired -s seed_lenght -d surrounding -t installation_test -c nbr_core -g host.fasta (warning increase process time) -l limit_multi-fasta -v virome_time]
-[--mm --dir_cov_mm path_to_coverage_results -c nb_cores --core_id idx_core -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
-[--mm --dir_cov_mm path_to_coverage_results --dir_seq_mm path_to_sequence_results --DR_path path_to_results --seq_id index_of_sequence --nb_pieces nbr_of_read_chunks -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
-[--mm --DR_path path_to_results --dir_seq_mm path_to_sequence_results -p reads_paired -s seed_lenght -d surrounding -l limit_multi-fasta]
-
-    Program: PhageTerm - Analyze phage termini and packaging mode using reads from high-throughput sequenced phage data
-    Version: 4.1 (also py3_release_1)
-    Contact: Julian Garneau <julian.garneau@usherbrooke.ca>
-    Contact: David Bikard <david.bikard@pasteur.fr>
-    Contact: Marc Monot <marc.monot@pasteur.fr>
-    Contact: Veronique Legrand <vlegrand@pasteur.fr>
-
-    You can perform a program test run upon installation using the "-t " option.
-    Arguments for the -t option can be : C5, C3, DS, DL, M , H or V
-
-    Example of test commands :
-    PhageTerm.py -t C5       -> Test run for a 5\' cohesive end (e.g. Lambda)
-    PhageTerm.py -t C3       -> Test run for a 3\' cohesive end (e.g. HK97)
-    PhageTerm.py -t DS     -> Test run for a Direct Terminal Repeats end short (e.g. T7)
-    PhageTerm.py -t DL     -> Test run for a Direct Terminal Repeats end long (e.g. T5)
-    PhageTerm.py -t H       -> Test run for a Headful packaging (e.g. P1)
-    PhageTerm.py -t M       -> Test run for a Mu-like packaging (e.g. Mu)
-    PhageTerm.py -t V       -> Test run for a Virome data
-    """
-
-
-## checkFastaFile
-#
-#  Checking input Fasta file (file existence and format).
-def checkFastaFile(filin):
-    """Check sequence Fasta file given by user"""
-    first_line = 1
-    infil = gzip.open(filin, "rt") if filin.endswith(".gz") else open(filin, 'r')
-    try:
-        for line in infil:
-            # Test '>'
-            if first_line :
-                if line[0] != '>':
-                    return 1
-                else:
-                    first_line = 0
-                    continue
-            # Test 1st base per line : 'ATGCN>'
-            base = changeCase(line[0])
-            if base != 'A' and base != 'T' and base != 'C' and base != 'G' and base != 'N' and base != '\n' and base != '\r' and base != '>':
-                infil.close()
-                return 1
-        infil.close()
-        return 0
-    except IOError:
-        sys.exit('ERROR: No such file %s' % filin)
-
-## setOptions
-#
-# Uses the OptionParser class. Defines all the options offered by phageterm and their default values if any.
-# Also defines the usage message.
-# Returns an optionParser object usable by the main program.
-def setOptions():
-    getopt = OptionParser(usage=usage)
-
-    optreads = OptionGroup(getopt, 'Raw reads file in fastq format')
-    optreads.add_option('-f', '--fastq', dest='fastq', metavar='FILE', help='Fastq reads from Illumina TruSeq')
-    getopt.add_option_group(optreads)
-
-    optref = OptionGroup(getopt, 'Phage genome in fasta format')
-    optref.add_option('-r', '--ref', dest='reference', metavar='FILE',
-                      help='Reference phage genome as contigs in fasta format')
-    getopt.add_option_group(optref)
-
-    optname = OptionGroup(getopt, 'Name of the phage being analyzed by the user')
-    optname.add_option('--report_title', dest='analysis_name', metavar='STRING',
-                       help='Manually enter the name of the analysis. Used as prefix for output file names. Default value is \"analysis_date_HHMM.')
-    getopt.add_option_group(optname)
-
-    optseed = OptionGroup(getopt, 'Lenght of the seed used for reads in the mapping process')
-    optseed.add_option('-s', '--seed', dest='seed', metavar='INT', type="int",
-                       help='Manually enter the lenght of the seed used for reads in the mapping process.')
-    getopt.add_option_group(optseed)
-
-    optsurround = OptionGroup(getopt, 'Lenght of the surrounding region considered for peak value cumulation')
-    optsurround.add_option('-d', '--surrounding', dest='surround', type="int", metavar='INT',
-                           help='Manually enter the lenght of the surrounding used to merge very close peaks in the analysis process.')
-    getopt.add_option_group(optsurround)
-
-    optcore = OptionGroup(getopt,
-                          'GPU and multicore options. Default is 1 core and no GPU.')
-    optcore.add_option('-c', '--core', dest='core', metavar='INT', type="int",
-                       help='Manually enter the number of core you want to use.')
-    getopt.add_option_group(optcore)
-    #optcore.add_option('-u', '--gpu', dest='gpu', action="store_true", default=False, # VL: Keep that for later use maybe.
-    #                   help='use this flag if you want to use GPU for read mapping')
-    #optcore.add_option("--dir_mapping_res",dest='gpu_mapping_res_dir',metavar='STRING',default=None, help="directory where to put mapping results produced by GPU")
-    # optcore.add_option("--idx_chunk",dest='idx_chunk',metavar='INT',default=None,help="index of the chunk for which we want to compute coverage")
-    # optcore.add_option("--nb_chunks", dest='nb_chunks',metavar='INT', type="int",default=None,help="Indicate number of chunks wanted for GPU mapping. If None, phageTerm will automatically compute it")
-    optmm=OptionGroup(getopt,"options for multi machine (or cluster mode)")
-    optmm.add_option("--core_id",dest='core_id',metavar='INT',type="int",default=None,help="This option is used together with -c when running Pageterm on a cluster in parallel multimachine mode.")
-    optmm.add_option("--mm",dest='multi_machine_mode',action='store_true',default=False,help="use this option to indicate that you want to use the cluster (or multi machine) mode.")
-    optmm.add_option("--dir_cov_mm",dest='dir_cov_mm',metavar='STRING',default=None,help="directory where to put coverage results produced by Phageterm")
-    optmm.add_option("--dir_seq_mm", dest='dir_seq_mm', metavar='STRING', default=None,
-                       help="directory where to put per sequence results produced by Phageterm")
-    optmm.add_option("--nb_pieces",dest='nb_pieces',metavar='INT',default=None,help="For per sequence processing after reads coverage has been done on the cluster")
-    optmm.add_option("--DR_path",dest='DR_path',metavar='STRING',default=None,help="Directory where to put content of DR dictionnary (per sequence processing results)")
-    optmm.add_option("--seq_id",dest='seq_id',metavar='INT',default=None,help="index of the sequence for which we want to compute coverage")
-    getopt.add_option_group(optmm)
-
-    optchk=OptionGroup(getopt,"options related to checkpoints.")
-    optchk.add_option("--chk_freq",dest='chk_freq',metavar='INT',default=0,help="Frequency in minutes at which reads coverage (the longuest step in phageTerm) intermediate results must be saved ")
-    optchk.add_option("--dir_chk",dest='dir_chk',metavar='STRING',default="",help="Directory where to put checkpoint files")
-    getopt.add_option_group(optchk)
-
-    opthost = OptionGroup(getopt, 'Host genome in fasta format')
-    opthost.add_option('-g', '--host', dest='host', metavar='FILE',
-                       help='Reference host genome as unique contig in fasta format')
-    getopt.add_option_group(opthost)
-
-    optpaired = OptionGroup(getopt, 'Use paired-end reads')
-    optpaired.add_option('-p', '--paired', dest='paired', metavar='FILE',
-                         help='Use paired-end reads to calculate real insert coverage')
-    getopt.add_option_group(optpaired)
-
-    optmean = OptionGroup(getopt, 'Defined phage mean coverage')
-    optmean.add_option('-m', '--mean', dest='mean', metavar='INT', type="int", help='Defined phage mean coverage')
-    getopt.add_option_group(optmean)
-
-    optlimit = OptionGroup(getopt, 'Limit minimum fasta size (Default: 500)')
-    optlimit.add_option('-l', '--limit', dest='limit', metavar='INT', type="int", help='Limit minimum fasta length')
-    getopt.add_option_group(optlimit)
-
-    optvirome = OptionGroup(getopt, 'Estimate execution time for a Virome')
-    optvirome.add_option('-v', '--virome', dest='virome', metavar='INT', type="int",
-                         help='Estimate execution time for a Virome')
-    getopt.add_option_group(optvirome)
-
-    opttest = OptionGroup(getopt, 'Perform a program test run upon installation')
-    opttest.add_option('-t', '--test', dest='test', metavar='STRING',
-                       help='Perform a program test run upon installation. If you want to perform a test run, use the "-t " option. Arguments for the -t option can be : C5, C3, DS, DL, H or M. C5 -> Test run for a 5\' cohesive end (e.g. Lambda); C3 -> Test run for a 3\' cohesive end (e.g. HK97); DS -> Test run for a short Direct Terminal Repeats end (e.g. T7); DL -> Test run for a long Direct Terminal Repeats end (e.g. T5); H -> Test run for a Headful packaging (e.g. P1); M -> Test run for a Mu-like packaging (e.g. Mu)')
-
-    opttest.add_option('--nrt',dest='nrt',action='store_true',default=False,help='dump phage Class name to special file for non regression testing')
-    getopt.add_option_group(opttest)
-
-    return getopt
-
-## User Raw data handling.
-#
-# This class provides encapsulation for raw data provided by the user as arguments to phageterm (input file names, testing mode if so, analysis_name, host and paired).
-# It also performs checkings on the input files and computes the number of reads.
-class inputRawDataArgs:
-    def __init__(self,fastq,reference,host,analysis_name,paired,test,nrt):
-        if test == "C5":
-            print("\nPerforming a test run using test phage sequence with 5 prime cohesive overhang :")
-            print("\npython PhageTerm.py -f test-data/COS-5.fastq -r test-data/COS-5.fasta --report_title TEST_cohesive_5_prime")
-            fastq = "test-data/COS-5.fastq"
-            reference = "test-data/COS-5.fasta"
-            analysis_name = "Test-cohesive-5'"
-        elif test == "C3":
-            print("\nPerforming a test run using test phage sequence with 3 prime cohesive overhang:")
-            print("\npython PhageTerm.py -f test-data/COS-3.fastq -r test-data/COS-3.fasta --report_title TEST_cohesive_3_prime")
-            fastq = "test-data/COS-3.fastq"
-            reference = "test-data/COS-3.fasta"
-            analysis_name = "Test-cohesive-3'"
-        elif test == "DS":
-            print("\nPerforming a test run using test phage sequence with short direct terminal repeats (DTR-short) :")
-            print("\npython PhageTerm.py -f test-data/DTR-short.fastq -r test-data/DTR-short.fasta --report_title TEST_short_direct_terminal_repeats")
-            fastq = "test-data/DTR-short.fastq"
-            reference = "test-data/DTR-short.fasta"
-            analysis_name = "Test-short-direct-terminal-repeats"
-        elif test == "DL":
-            print("\nPerforming a test run using test phage sequence with long direct terminal repeats (DTR-long) :")
-            print("\npython PhageTerm.py -f test-data/DTR-long.fastq -r test-data/DTR-long.fasta --report_title TEST_long_direct_terminal_repeats")
-            fastq = "test-data/DTR-long.fastq"
-            reference = "test-data/DTR-long.fasta"
-            analysis_name = "Test-long-direct-terminal-repeats"
-        elif test == "H":
-            print("\nPerforming a test run using test phage sequence with headful packaging")
-            print("\npython PhageTerm.py -f test-data/Headful.fastq -r test-data/Headful.fasta --report_title TEST_headful")
-            fastq = "test-data/Headful.fastq"
-            reference = "test-data/Headful.fasta"
-            analysis_name = "Test-Headful"
-        elif test == "M":
-            print("\nPerforming a test run using test phage sequence with Mu-like packaging")
-            print("\npython PhageTerm.py -f test-data/Mu-like_R1.fastq -p test-data/Mu-like_R2.fastq -r test-data/Mu-like.fasta --report_title TEST_Mu-like -g test-data/Mu-like_host.fasta")
-            fastq = "test-data/Mu-like_R1.fastq"
-            paired = "test-data/Mu-like_R2.fastq"
-            reference = "test-data/Mu-like.fasta"
-            host = "test-data/Mu-like_host.fasta"
-            analysis_name = "Test-Mu-like"
-        elif test == "V":
-            print("\nPerforming a test run using virome data containing one example of each packaging mode")
-            print("\npython PhageTerm.py -f test-data/Virome.fastq -r test-data/Virome.fasta --report_title TEST_Virome")
-            fastq = "test-data/Virome.fastq"
-            reference = "test-data/Virome.fasta"
-            analysis_name = "Test-Virome"
-        elif test==None:
-            pass # Not a test, normal use.
-        else:
-            print("Unrecognized test run argument ('{}')!\nAllowed options are {}.".format(test, "C5, C3, DS, DL, H or M"))
-
-        if host == None:
-            host = ""
-        if paired == None:
-            paired = ""
-        # CHECK inputs
-        if analysis_name!=None:
-            analysis_name = checkReportTitle(analysis_name)
-            self.analysis_name = analysis_name
-        else:
-            self.analysis_name="NA"
-        if checkFastaFile(reference):
-            exit("ERROR in reference file")
-        self.reference = reference
-        if host != "":
-            if checkFastaFile(host):
-                exit("ERROR in reference file")
-            self.host = host
-        self.fastq=fastq
-        self.paired=paired
-        self.host=host
-        self.nrt=nrt
-        if (self.nrt==True):
-            print("running nrt tests")
-
-        # READS Number
-        self.tot_reads = totReads(fastq)
-        if paired != "":
-            self.tot_reads_paired = totReads(paired)
-            if (self.tot_reads != self.tot_reads_paired):
-                print("\nWARNING: Number of reads between the two reads files differ, using single reads only\n")
-                self.paired = ""
-
-
-## User functional parameters handling
-#
-# Here gather user input parameters and global variable that define how the data will be processed from a functionnal point of view (ex: seed length...)
-class functionalParms:
-    def __init__(self,seed,surround,mean,limit,virome,test):
-        if seed == None:
-            seed = 20
-        if seed < 15:
-            seed = 15
-        if surround == None:
-            surround = 20
-        self.seed=seed
-        self.surrounding=surround
-
-        if limit == None:
-            limit = 500
-        self.limit_reference=limit
-
-        if virome == None:
-            virome = 0
-        if virome != 1:
-            virome = 0
-        self.virome=virome
-
-        if mean == None:
-            mean = 250
-        self.mean=mean
-        if test == None:
-            self.test_run = 0
-        else:
-            self.test_run = 1
-        self.test=test
-        if test=="H" or test=="M" or test=="V":
-            self.surrounding = 0
-        if test=="V":
-            self.workflow = 1
-        # VARIABLE
-        self.edge = 500
-        self.insert_max = 1000
-        self.limit_fixed = 35
-        self.limit_preferred = 11
-        self.Mu_threshold = 0.5
-        self.draw = 0
-        self.workflow = 0
-
-## Derive other parameter from functional and raw parameters.
-#
-# Here, gather data derived from the rawInputData and updated according to the functionnal parameters.
-# functionnal parameter workflow can also be updated.
-class InputDerivedDataArgs:
-    def __init__(self,inputRaw,fparms):
-        # REFERENCE sequence recovery and edge adds
-        self.refseq_liste, self.refseq_name, refseq_rejected = genomeFastaRecovery(inputRaw.reference, fparms.limit_reference, fparms.edge)
-        #print strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
-        self.nbr_virome = len(self.refseq_liste)
-        if self.nbr_virome == 0:
-            print("\nERROR: All the reference(s) sequence(s) are under the length limitation : " + str(
-                fparms.limit_reference) + " (adapt your -l option)")
-            exit()
-        if self.nbr_virome > 1:
-            fparms.workflow = 1
-        length_virome = len("".join(self.refseq_liste))
-        self.mean_virome = length_virome // self.nbr_virome
-        if fparms.virome:
-            self.refseq_liste, self.refseq_name, refseq_rejected = ["N" * int(self.mean_virome)], ["Test_virome"], 0
-        if len(self.refseq_liste) == 1 and inputRaw.host != "":
-            self.hostseq = genomeFastaRecovery(inputRaw.host, fparms.limit_reference, fparms.edge, 1)
-            if len(self.hostseq[0]) != 0 and len(self.hostseq[0]) > len(self.refseq_liste[0]):
-                print("\nHost length < Phage length : removing host sequence.")
-                self.hostseq = ""
-        else:
-            self.hostseq = ""
-            if len(self.refseq_liste) > 1:
-                print("\nWARNING: Host analysis impossible with multiple fasta input\n")
-
-## Handling of technical parameters given by the user
-#
-# Here gather user input parameters and former global variable that define how the data will be processed from a technical point of view (ex: multicore,gpu...)
-# VL: here keep parameters related to gpu processing just in case GPU code would be needed one day for evolutions but they are not used.
-class technicalParms:
-    def __init__(self, core, gpu, mean, gpu_mapping_res_dir, nb_chunks, dir_cov_mm, seq_id, idx_chunk, \
-                 core_id, dir_seq_mm, multi_machine_mode, DR_path, nb_pieces,chk_freq=0,dir_chk="",test_mode=False):
-        self.chk_freq=chk_freq
-        self.dir_chk=dir_chk
-        self.multi_machine=multi_machine_mode
-        self.core = core
-        self.wanted_chunks = nb_chunks
-        self.dir_cov_mm = dir_cov_mm
-        self.DR_path=DR_path
-        self.test_mode=test_mode # used for testing the checkpoint implementation.
-        if nb_pieces!=None:
-            self.nb_pieces=int(nb_pieces)
-        else:
-            self.nb_pieces =None
-        if idx_chunk!=None:
-            self.idx_chunk=int(idx_chunk)
-        else:
-            self.idx_chunk =None
-        if seq_id!=None:
-            self.seq_id=int(seq_id)
-        else:
-            self.seq_id=None
-        self.core_id=core_id
-        self.dir_seq_mm=dir_seq_mm
-        if core == None:
-            self.core = 1
-        self.limit_coverage = max(50, mean * 2) / float(self.core)
-        if gpu ==True and self.core > 1:
-            print("Choose either multicore or gpu!")
-            exit(1)
-        self.gpu=gpu
-        if gpu == None:
-            self.gpu = False
-        self.gpu_mapping_res_dir=gpu_mapping_res_dir
-        if self.gpu==True and (self.dir_cov_mm != None or self.dir_seq_mm != None):
-            print("when -g is used it is either to perform mapping only or whole process, --dir-cov_res/--dir_seq_res and -g are thus mutually exclusive")
-            exit(1)
-        if (self.gpu==True and self.core_id!=None):
-            print("Inconsistency in options. -u/--gpu cannot be used with --core_id")
-            exit(1)
-        if self.chk_freq!=0 and self.dir_chk=="":
-            print("Inconsistency in options: if frequency for checkpoints is not NULL (you activated checkpoints), you must also indicate in which directory to put them.")
-            exit(1)
-        if self.chk_freq==0 and self.dir_chk!="":
-            print("Inconsistency in options: checkpoints are deactivated (frequency is 0) but you indicated directory for them!")
-            exit(1)
-        if self.multi_machine==True:
-            if (self.dir_cov_mm==None and self.dir_seq_mm==None and self.DR_path==None):
-                print("Please proivide path where to put results in multi machine mode")
-                exit(1)
-            elif  self.dir_cov_mm!=None and self.dir_seq_mm==None: # step 1: mapping+readsCoverage.
-                self.checkOptConsistencyS1()
-            elif  self.dir_cov_mm!=None and self.dir_seq_mm!=None: # step 2: per-sequence processing
-                self.checkOptConsistencyS2()
-            elif self.dir_cov_mm==None and self.dir_seq_mm!=None: # step 3: final report generation
-                self.checkOptConsistencyS3()
-            else:
-                print("inconsistencies in options; please read documentation")
-                print(usage)
-                exit(1)
-        else:
-            if (self.dir_cov_mm!=None or self.dir_seq_mm!=None or self.DR_path!=None):
-                print("Inconsistency in options: please use --mm if you intend to use multi machine mode")
-                exit(1)
-            if (self.chk_freq!=0 or self.dir_chk)!="":
-                print("checkpoints can only be used in multi-machine mode")
-                exit(0)
-        ## GPU stuff, in case we need it one day
-        # if (self.core>1 and self.core_id==None):
-        #     if (self.gpu_mapping_res_dir!=None or self.dir_seq_res!=None or self.dir_cov_res!=None):
-        #         print "Indicate core_id when processing mapping or coverage resuts on a cluster"
-        #         exit(1)
-        # if (self.core>1 and self.core_id!=None):
-        #     if not((self.gpu_mapping_res_dir!=None and self.dir_cov_res!=None) or (self.dir_cov_res!=None and self.dir_seq_res==None)):
-        #         print " Indicate both directory where to find intermediate results to process and directory where to put the results of this processing"
-        #         exit(1)
-        # if self.dir_cov_res!=None and (self.idx_seq!=None or self.idx_chunk!=None) and self.dir_seq_res==None and self.dir_mapping_res!=None:
-        #     print "Please provide both index of sequence and chunk index. In case you have hostseq, it has index 0 by convention so --idx_seq must be >=1."
-        #     exit(1)
-        # if self.core<=1 and self.dir_cov_res!=None:
-        #     print "Putting coverage results in files is usually used with multi-machine (cluster) mode"
-        #     exit(1)
-
-    def checkOptConsistencyS1(self):
-        if self.core_id == None:
-            print("Please indicate core_id when running mapping/coverage in multi machine mode")
-            exit(1)
-        if (self.core_id >= self.core):
-            print("--core_id must be >=0 and <nb_cores")
-            exit(1)
-        if self.core == 1:
-            print("Warning : running on only 1 core!")
-        if self.DR_path != None:
-            print("--DR_path is used at step 2 and step 3. It is incompatible with --dir_cov_res (step 1)")
-            exit(1)
-        if self.seq_id != None:
-            print("--seq_id is only used at step 2. It is incompatible with --dir_cov_res (step 1)")
-            exit(1)
-        if self.nb_pieces != None:
-            print("--nb_pieces is only used at step 2. It is incompatible with --dir_cov_res (step 1)")
-            exit(1)
-
-    def checkOptConsistencyS2(self):
-        if self.DR_path == None:
-            print("Please indicate DR_path when running per sequence processing in multi machine mode")
-            exit(1)
-        if self.seq_id == None:
-            print("Please indicate index of sequence to process in multi machine mode.")
-            exit(1)
-        if self.nb_pieces == None:
-            print(" Please indicate in how many number of packets the reads were mapped during step 1.")
-            exit(1)
-        if self.core_id != None:
-            print("There is no need to specify --core_id doing step 2 in multi machine mode (per sequence processing of the results of step 1)")
-            exit(1)
-        if self.core != 1:
-            print("There is no need to specify --core  doing step 2 in multi machine mode (per sequence processing of the results of step 1)")
-            exit(1)
-
-    def checkOptConsistencyS3(self):
-        if self.DR_path == None:
-            print("Please indicate DR_path for generating final report.")
-            exit(1)
-        if self.seq_id != None:
-            print("--seq_id is incompatible with step 3 (report generation)")
-            exit(1)
-        if self.nb_pieces != None:
-            print("--nb_pieces is incompatible with step 3 (report generation)")
-            exit(1)
-        if self.core_id != None:
-            print("--core_id is incompatible with step 3 (report generation)")
-            exit(1)
-        if self.core != 1:
-            print("--core_id is incompatible with step 3 (report generation)")
-            exit(1)
-
-
-## Checks options and arguments consistency and instantiates data structure for main.
-#
-# Consistency checkings and instantiation of technicalParms, inputDerivedDataArgs, functionalParms, inputRawDataArgs objects that are directly usable inside main.
-def checkOptArgsConsistency(getopt):
-    """
-
-    :rtype:
-    """
-    options, arguments = getopt.parse_args()
-    if options.fastq == None and options.test == None:
-        getopt.error('\tNo reads file provided.\n\t\t\tUse -h or --help for more details\n')
-
-    if options.reference == None and options.test == None:
-        getopt.error('\tNo fasta reference file provided.\n\t\t\tUse -h or --help for more details\n')
-
-    if options.analysis_name == None and options.test == None:
-        analysis_name = "Analysis"
-
-    inRawDArgs = inputRawDataArgs(options.fastq, options.reference, options.host, options.analysis_name, options.paired,
-                                  options.test,options.nrt)
-    fParms = functionalParms(options.seed, options.surround, options.mean, options.limit, options.virome, options.test)
-    tParms = technicalParms(options.core, None, fParms.mean, None, None,
-                            options.dir_cov_mm, options.seq_id, None, options.core_id,
-                            options.dir_seq_mm, options.multi_machine_mode,
-                            options.DR_path,options.nb_pieces,
-                            float(options.chk_freq), options.dir_chk, False)
-    inDArgs = InputDerivedDataArgs(inRawDArgs, fParms)
-    return inRawDArgs, fParms, tParms, inDArgs  # TODO: make a version that returns only 1 structure gathering only the useful information.
diff --git a/doxy_doc_phageterm.conf b/doxy_doc_phageterm.conf
deleted file mode 100644
index c1986ca3369e174b92a870ce83ea2e807a57f16e..0000000000000000000000000000000000000000
--- a/doxy_doc_phageterm.conf
+++ /dev/null
@@ -1,2448 +0,0 @@
-# Doxyfile 1.8.15
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the configuration
-# file that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING      = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME           = "Phageterm"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER         = 1.1.1.OCL.1
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF          = Examining reads against reference sequence and, by harnessing a sequencing bias on phage terminii,  determine if reference sequence is a phage or not.
-
-# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
-# in the documentation. The maximum height of the logo should not exceed 55
-# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
-# the logo to the output directory.
-
-PROJECT_LOGO           =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY       = doc
-
-# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS         = NO
-
-# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
-# characters to appear in the names of generated files. If set to NO, non-ASCII
-# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
-# U+3044.
-# The default value is: NO.
-
-ALLOW_UNICODE_NAMES    = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE        = English
-
-# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all generated output in the proper direction.
-# Possible values are: None, LTR, RTL and Context.
-# The default value is: None.
-
-OUTPUT_TEXT_DIRECTION  = None
-
-# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC      = YES
-
-# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF           = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF       = "The $name class" \
-                         "The $name widget" \
-                         "The $name file" \
-                         is \
-                         provides \
-                         specifies \
-                         contains \
-                         represents \
-                         a \
-                         an \
-                         the
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC    = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB  = NO
-
-# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES        = YES
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH        =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH    =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES            = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF      = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF           = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS           = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
-# page for each member. If set to NO, the documentation of a member will be part
-# of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES  = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE               = 4
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines (in the resulting output). You can put ^^ in the value part of an
-# alias to insert a newline as if a physical newline was in the original file.
-
-ALIASES                =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C  = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA   = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN   = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL   = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
-# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
-# Fortran. In the later case the parser tries to guess whether the code is fixed
-# or free formatted code, this is the default for Fortran type files), VHDL. For
-# instance to make doxygen treat .inc files as Fortran files (default is PHP),
-# and .f files as C (default is Fortran), use: inc=Fortran f=C.
-#
-# Note: For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING      =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-MARKDOWN_SUPPORT       = YES
-
-# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
-# to that level are automatically included in the table of contents, even if
-# they do not have an id attribute.
-# Note: This feature currently applies only to Markdown headings.
-# Minimum value: 0, maximum value: 99, default value: 0.
-# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
-
-TOC_INCLUDE_HEADINGS   = 0
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by putting a % sign in front of the word or
-# globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-AUTOLINK_SUPPORT       = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT    = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT        = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT            = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT   = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC   = NO
-
-# If one adds a struct or class to a group and this option is enabled, then also
-# any nested class or struct is added to the same group. By default this option
-# is disabled and one has to add nested compounds explicitly via \ingroup.
-# The default value is: NO.
-
-GROUP_NESTED_COMPOUNDS = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING            = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS  = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT   = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE      = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL            = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE        = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PACKAGE        = NO
-
-# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC         = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO,
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES  = YES
-
-# This flag is only useful for Objective-C code. If set to YES, local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO, only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS  = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES   = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS     = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO, these classes will be included in the various overviews. This option
-# has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES     = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO, these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS  = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO, these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS      = NO
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS          = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES, upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES       = NO
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES, the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES       = NO
-
-# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
-# append additional text to a page's title, such as Class Reference. If set to
-# YES the compound reference will be hidden.
-# The default value is: NO.
-
-HIDE_COMPOUND_REFERENCE= NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES     = YES
-
-# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
-# grouped member an include statement to the documentation, telling the reader
-# which file to include in order to use the member.
-# The default value is: NO.
-
-SHOW_GROUPED_MEMB_INC  = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES   = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO            = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS       = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO, the members will appear in declaration order. Note that
-# this will also influence the order of the classes in the class list.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS        = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES       = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME     = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING  = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
-# list. This list is created by putting \todo commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST      = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
-# list. This list is created by putting \test commands in the documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST      = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST       = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if <section_label> ... \endif and \cond <section_label>
-# ... \endcond blocks.
-
-ENABLED_SECTIONS       =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES  = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES, the
-# list will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES        = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES             = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES        = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER    =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE            =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. See also \cite for info how to create references.
-
-CITE_BIB_FILES         =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET                  = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS               = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED   = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR      = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO, doxygen will only warn about wrong or incomplete
-# parameter documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC       = NO
-
-# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
-# a warning is encountered.
-# The default value is: NO.
-
-WARN_AS_ERROR          = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT            = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE           =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
-# Note: If this tag is empty the current directory is searched.
-
-INPUT                  = .
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING         = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories.
-#
-# Note that for custom extensions or not directly supported extensions you also
-# need to set EXTENSION_MAPPING for the extension otherwise the files are not
-# read by doxygen.
-#
-# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
-# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
-# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
-# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
-# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
-
-FILE_PATTERNS          = *.py
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE              = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE                =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS       = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS       =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS        =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH           =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS       = *
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE      = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH             =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-# <filter> <input-file>
-#
-# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-#
-# Note that for custom extensions or not directly supported extensions you also
-# need to set EXTENSION_MAPPING for the extension otherwise the files are not
-# properly processed by doxygen.
-
-INPUT_FILTER           =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-#
-# Note that for custom extensions or not directly supported extensions you also
-# need to set EXTENSION_MAPPING for the extension otherwise the files are not
-# properly processed by doxygen.
-
-FILTER_PATTERNS        =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES    = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER         = YES
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES         = YES
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS    = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# entity all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION    = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-SOURCE_TOOLTIPS        = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see https://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS              = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS       = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX     = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX          =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML          = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT            = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION    = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER            =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER            =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET        =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# cascading style sheets that are included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefore more robust against future updates.
-# Doxygen will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list). For an example see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_STYLESHEET  =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES       =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the style sheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE    = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT    = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA  = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to YES can help to show when doxygen was last run and thus if the
-# documentation is up to date.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP         = NO
-
-# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
-# documentation will contain a main index with vertical navigation menus that
-# are dynamically created via Javascript. If disabled, the navigation index will
-# consists of multiple levels of tabs that are statically embedded in every HTML
-# page. Disable this option to support browsers that do not have Javascript,
-# like the Qt help browser.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_MENUS     = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS  = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: https://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See https://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET        = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME        = "Doxygen generated docs"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID       = org.doxygen.Project
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME  = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP      = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE               =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler (hhc.exe). If non-empty,
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION           =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated
-# (YES) or that it should be included in the master .chm file (NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI           = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING     =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated
-# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
-# enables the Previous and Next buttons.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC             = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND             = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP           = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE               =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://doc.qt.io/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE          = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://doc.qt.io/qt-4.8/qthelpproject.html#virtual-folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER     = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME   =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS  =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://doc.qt.io/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS  =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION           =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP   = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID         = org.doxygen.Project
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX          = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW      = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH         = 250
-
-# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW    = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE       = 10
-
-# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT    = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# https://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX            = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_FORMAT         = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from https://www.mathjax.org before deployment.
-# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH        = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS     =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_CODEFILE       =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use <access key> + S
-# (what the <access key> is depends on the OS and browser, but it is typically
-# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
-# key> to jump into the search results window, the results can be navigated
-# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
-# the search. The filter options can be selected when the cursor is inside the
-# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
-# to select a filter and <Enter> or <escape> to activate or cancel the filter
-# option.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-SEARCHENGINE           = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
-# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
-# setting. When disabled, doxygen will generate a PHP script for searching and
-# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
-# and searching needs to be provided by external tools. See the section
-# "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SERVER_BASED_SEARCH    = NO
-
-# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
-# search results.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: https://xapian.org/).
-#
-# See the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH        = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will return the search results when EXTERNAL_SEARCH is enabled.
-#
-# Doxygen ships with an example indexer (doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: https://xapian.org/). See the section "External Indexing and
-# Searching" for details.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHENGINE_URL       =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-# The default file is: searchdata.xml.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHDATA_FILE        = searchdata.xml
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH_ID     =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
-# to a relative location where the documentation can be found. The format is:
-# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTRA_SEARCH_MAPPINGS  =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
-# The default value is: YES.
-
-GENERATE_LATEX         = YES
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_OUTPUT           = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked.
-#
-# Note that when not enabling USE_PDFLATEX the default is latex when enabling
-# USE_PDFLATEX the default is pdflatex and when in the later case latex is
-# chosen this is overwritten by pdflatex. For specific output languages the
-# default can have been set differently, this depends on the implementation of
-# the output language.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_CMD_NAME         =
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
-# index for LaTeX.
-# The default file is: makeindex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-MAKEINDEX_CMD_NAME     = makeindex
-
-# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-COMPACT_LATEX          = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used by the
-# printer.
-# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
-# 14 inches) and executive (7.25 x 10.5 inches).
-# The default value is: a4.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PAPER_TYPE             = a4
-
-# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. The package can be specified just
-# by its name or with the correct syntax as to be used with the LaTeX
-# \usepackage command. To get the times font for instance you can specify :
-# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
-# To use the option intlimits with the amsmath package you can specify:
-# EXTRA_PACKAGES=[intlimits]{amsmath}
-# If left blank no extra packages will be included.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-EXTRA_PACKAGES         =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
-#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
-# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
-# string, for the replacement values of the other commands the user is referred
-# to HTML_HEADER.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HEADER           =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer. See
-# LATEX_HEADER for more information on how to generate a default footer and what
-# special commands can be used inside the footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_FOOTER           =
-
-# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# LaTeX style sheets that are included after the standard style sheets created
-# by doxygen. Using this option one can overrule certain style aspects. Doxygen
-# will copy the style sheet files to the output directory.
-# Note: The order of the extra style sheet files is of importance (e.g. the last
-# style sheet in the list overrules the setting of the previous ones in the
-# list).
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_STYLESHEET =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the LATEX_OUTPUT output
-# directory. Note that the files will be copied as-is; there are no commands or
-# markers available.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_FILES      =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
-# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
-# contain links (just like the HTML output) instead of page references. This
-# makes the output suitable for online browsing using a PDF viewer.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PDF_HYPERLINKS         = YES
-
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES, to get a
-# higher quality PDF documentation.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-USE_PDFLATEX           = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BATCHMODE        = NO
-
-# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
-# index chapters (such as File Index, Compound Index, etc.) in the output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HIDE_INDICES     = NO
-
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE      = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. See
-# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
-# The default value is: plain.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BIB_STYLE        = plain
-
-# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_TIMESTAMP        = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
-# RTF output is optimized for Word 97 and may not look too pretty with other RTF
-# readers/editors.
-# The default value is: NO.
-
-GENERATE_RTF           = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: rtf.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_OUTPUT             = rtf
-
-# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-COMPACT_RTF            = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
-# contain hyperlink fields. The RTF file will contain links (just like the HTML
-# output) instead of page references. This makes the output suitable for online
-# browsing using Word or some other Word compatible readers that support those
-# fields.
-#
-# Note: WordPad (write) and others do not support links.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_HYPERLINKS         = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's
-# configuration file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-#
-# See also section "Doxygen usage" for information on how to generate the
-# default style sheet that doxygen normally uses.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_STYLESHEET_FILE    =
-
-# Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's configuration file. A template extensions file can be
-# generated using doxygen -e rtf extensionFile.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_EXTENSIONS_FILE    =
-
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE        = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
-# classes and files.
-# The default value is: NO.
-
-GENERATE_MAN           = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it. A directory man3 will be created inside the directory specified by
-# MAN_OUTPUT.
-# The default directory is: man.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_OUTPUT             = man
-
-# The MAN_EXTENSION tag determines the extension that is added to the generated
-# man pages. In case the manual section does not start with a number, the number
-# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
-# optional.
-# The default value is: .3.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_EXTENSION          = .3
-
-# The MAN_SUBDIR tag determines the name of the directory created within
-# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
-# MAN_EXTENSION with the initial . removed.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_SUBDIR             =
-
-# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
-# will generate one additional man file for each entity documented in the real
-# man page(s). These additional files only source the real man page, but without
-# them the man command would be unable to find the correct page.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_LINKS              = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
-# captures the structure of the code including all documentation.
-# The default value is: NO.
-
-GENERATE_XML           = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: xml.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_OUTPUT             = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
-# listings (including syntax highlighting and cross-referencing information) to
-# the XML output. Note that enabling this will significantly increase the size
-# of the XML output.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_PROGRAMLISTING     = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
-# that can be used to generate PDF.
-# The default value is: NO.
-
-GENERATE_DOCBOOK       = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it.
-# The default directory is: docbook.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_OUTPUT         = docbook
-
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
-#---------------------------------------------------------------------------
-# Configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
-# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
-# the structure of the code including all documentation. Note that this feature
-# is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_AUTOGEN_DEF   = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
-# file that captures the structure of the code including all documentation.
-#
-# Note that this feature is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_PERLMOD       = NO
-
-# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
-# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
-# output from the Perl module output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_LATEX          = NO
-
-# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
-# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO, the
-# size of the Perl module output will be much smaller and Perl will parse it
-# just the same.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_PRETTY         = YES
-
-# The names of the make variables in the generated doxyrules.make file are
-# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
-# so different doxyrules.make files included by the same Makefile don't
-# overwrite each other's variables.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
-# C-preprocessor directives found in the sources and include files.
-# The default value is: YES.
-
-ENABLE_PREPROCESSING   = YES
-
-# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
-# in the source code. If set to NO, only conditional compilation will be
-# performed. Macro expansion can be done in a controlled way by setting
-# EXPAND_ONLY_PREDEF to YES.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-MACRO_EXPANSION        = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
-# the macro expansion is limited to the macros specified with the PREDEFINED and
-# EXPAND_AS_DEFINED tags.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_ONLY_PREDEF     = NO
-
-# If the SEARCH_INCLUDES tag is set to YES, the include files in the
-# INCLUDE_PATH will be searched if a #include is found.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SEARCH_INCLUDES        = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by the
-# preprocessor.
-# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-
-INCLUDE_PATH           =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will be
-# used.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-INCLUDE_FILE_PATTERNS  =
-
-# The PREDEFINED tag can be used to specify one or more macro names that are
-# defined before the preprocessor is started (similar to the -D option of e.g.
-# gcc). The argument of the tag is a list of macros of the form: name or
-# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
-# is assumed. To prevent a macro definition from being undefined via #undef or
-# recursively expanded use the := operator instead of the = operator.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-PREDEFINED             =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
-# tag can be used to specify a list of macro names that should be expanded. The
-# macro definition that is found in the sources will be used. Use the PREDEFINED
-# tag if you want to use a different macro definition that overrules the
-# definition found in the source code.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_AS_DEFINED      =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all references to function-like macros that are alone on a line, have
-# an all uppercase name, and do not end with a semicolon. Such function macros
-# are typically used for boiler-plate code, and will confuse the parser if not
-# removed.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SKIP_FUNCTION_MACROS   = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES tag can be used to specify one or more tag files. For each tag
-# file the location of the external documentation should be added. The format of
-# a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where loc1 and loc2 can be relative or absolute paths or URLs. See the
-# section "Linking to external documentation" for more information about the use
-# of tag files.
-# Note: Each tag file must have a unique name (where the name does NOT include
-# the path). If a tag file is not located in the directory in which doxygen is
-# run, you must also specify the path to the tagfile here.
-
-TAGFILES               =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
-# tag file that is based on the input files it reads. See section "Linking to
-# external documentation" for more information about the usage of tag files.
-
-GENERATE_TAGFILE       =
-
-# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
-# the class index. If set to NO, only the inherited external classes will be
-# listed.
-# The default value is: NO.
-
-ALLEXTERNALS           = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will be
-# listed.
-# The default value is: YES.
-
-EXTERNAL_GROUPS        = YES
-
-# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
-# the related pages index. If set to NO, only the current project's pages will
-# be listed.
-# The default value is: YES.
-
-EXTERNAL_PAGES         = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH              = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS         = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH            =
-
-# You can include diagrams made with dia in doxygen documentation. Doxygen will
-# then run dia to produce the diagram and insert it in the documentation. The
-# DIA_PATH tag allows you to specify the directory where the dia binary resides.
-# If left empty dia is assumed to be found in the default search path.
-
-DIA_PATH               =
-
-# If set to YES the inheritance and collaboration graphs will hide inheritance
-# and usage relations if the target is undocumented or is not a class.
-# The default value is: YES.
-
-HIDE_UNDOC_RELATIONS   = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
-# Bell Labs. The other options in this section have no effect if this option is
-# set to NO
-# The default value is: NO.
-
-HAVE_DOT               = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
-# to run in parallel. When set to 0 doxygen will base this on the number of
-# processors available in the system. You can set it explicitly to a value
-# larger than 0 to get control over the balance between CPU load and processing
-# speed.
-# Minimum value: 0, maximum value: 32, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_NUM_THREADS        = 0
-
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTNAME           = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTSIZE           = 10
-
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTPATH           =
-
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CLASS_GRAPH            = YES
-
-# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
-# graph for each documented class showing the direct and indirect implementation
-# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-COLLABORATION_GRAPH    = YES
-
-# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GROUP_GRAPHS           = YES
-
-# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LOOK               = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
-# class node. If there are many fields or methods and many nodes the graph may
-# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
-# number of items for each type to make the size more manageable. Set this to 0
-# for no limit. Note that the threshold may be exceeded by 50% before the limit
-# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
-# but if the number exceeds 15, the total amount of fields shown is limited to
-# 10.
-# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LIMIT_NUM_FIELDS   = 10
-
-# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
-# collaboration graphs will show the relations between templates and their
-# instances.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-TEMPLATE_RELATIONS     = NO
-
-# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
-# YES then doxygen will generate a graph for each documented file showing the
-# direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDE_GRAPH          = YES
-
-# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
-# set to YES then doxygen will generate a graph for each documented file showing
-# the direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDED_BY_GRAPH      = YES
-
-# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command. Disabling a call graph can be
-# accomplished by means of the command \hidecallgraph.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALL_GRAPH             = NO
-
-# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command. Disabling a caller graph can be
-# accomplished by means of the command \hidecallergraph.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALLER_GRAPH           = NO
-
-# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
-# hierarchy of all classes instead of a textual one.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GRAPHICAL_HIERARCHY    = YES
-
-# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
-# dependencies a directory has on other directories in a graphical way. The
-# dependency relations are determined by the #include relations between the
-# files in the directories.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DIRECTORY_GRAPH        = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. For an explanation of the image formats see the section
-# output formats in the documentation of the dot tool (Graphviz (see:
-# http://www.graphviz.org/)).
-# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
-# to make the SVG files visible in IE 9+ (other browsers do not have this
-# requirement).
-# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
-# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
-# png:gdiplus:gdiplus.
-# The default value is: png.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_IMAGE_FORMAT       = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-#
-# Note that this requires a modern browser other than Internet Explorer. Tested
-# and working are Firefox, Chrome, Safari, and Opera.
-# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
-# the SVG files visible. Older versions of IE do not have SVG support.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INTERACTIVE_SVG        = NO
-
-# The DOT_PATH tag can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_PATH               =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the \dotfile
-# command).
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOTFILE_DIRS           =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
-
-MSCFILE_DIRS           =
-
-# The DIAFILE_DIRS tag can be used to specify one or more directories that
-# contain dia files that are included in the documentation (see the \diafile
-# command).
-
-DIAFILE_DIRS           =
-
-# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
-# path where java can find the plantuml.jar file. If left blank, it is assumed
-# PlantUML is not used or called during a preprocessing step. Doxygen will
-# generate a warning when it encounters a \startuml command in this case and
-# will not generate output for the diagram.
-
-PLANTUML_JAR_PATH      =
-
-# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
-# configuration file for plantuml.
-
-PLANTUML_CFG_FILE      =
-
-# When using plantuml, the specified paths are searched for files specified by
-# the !include statement in a plantuml block.
-
-PLANTUML_INCLUDE_PATH  =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
-# that will be shown in the graph. If the number of nodes in a graph becomes
-# larger than this value, doxygen will truncate the graph, which is visualized
-# by representing a node as a red box. Note that doxygen if the number of direct
-# children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
-# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-# Minimum value: 0, maximum value: 10000, default value: 50.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_GRAPH_MAX_NODES    = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
-# generated by dot. A depth value of 3 means that only nodes reachable from the
-# root by following a path via at most 3 edges will be shown. Nodes that lay
-# further from the root node will be omitted. Note that setting this option to 1
-# or 2 may greatly reduce the computation time needed for large code bases. Also
-# note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-# Minimum value: 0, maximum value: 1000, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-MAX_DOT_GRAPH_DEPTH    = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT        = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10) support
-# this, this feature is disabled by default.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_MULTI_TARGETS      = NO
-
-# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
-# explaining the meaning of the various boxes and arrows in the dot generated
-# graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GENERATE_LEGEND        = YES
-
-# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
-# files that are used to generate the various graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_CLEANUP            = YES
diff --git a/phageterm/SeqStats.py b/phageterm/SeqStats.py
deleted file mode 100644
index e811c0588d9dbaadb1af7788c1ea52ed81081bba..0000000000000000000000000000000000000000
--- a/phageterm/SeqStats.py
+++ /dev/null
@@ -1,93 +0,0 @@
-##@file SeqStats.py
-#
-# Utility class to store results (statistics) for a sequence once all coverage results have been processed for it
-
-class SeqStats:
-    def __init__(self,P_class, P_left, P_right, P_type, P_orient, ave_whole_cov, phage_plus_norm, phage_minus_norm, ArtcohesiveSeq,\
-                 P_seqcoh, Redundant, Mu_like, added_whole_coverage, Permuted, termini_coverage_norm_close, picMaxPlus_norm_close, \
-                 picMaxMinus_norm_close, gen_len, termini_coverage_close,ArtPackmode, termini, forward, reverse, ArtOrient, \
-                 picMaxPlus_close, picMaxMinus_close, picOUT_norm_forw, picOUT_norm_rev, picOUT_forw, picOUT_rev, \
-                 lost_perc, R1, R2, R3, picMaxPlus_host, picMaxMinus_host, drop_cov, added_paired_whole_coverage, P_concat):
-        self.P_class=P_class # TODO: some information about the meaning of these fields would be welcome.
-        self.P_left=P_left
-        self.P_right=P_right
-        self.P_type=P_type
-        self.P_orient=P_orient
-        self.ave_whole_cov=ave_whole_cov
-        self.phage_plus_norm=phage_plus_norm
-        self.phage_minus_norm=phage_minus_norm
-        self.ArtcohesiveSeq=ArtcohesiveSeq
-        self.P_seqcoh=P_seqcoh
-        self.Redundant=Redundant
-        self.Mu_like=Mu_like
-        self.added_whole_coverage=added_whole_coverage
-        self.Permuted=Permuted
-        self.termini_coverage_norm_close=termini_coverage_norm_close
-        self.picMaxPlus_norm_close=picMaxPlus_norm_close
-        self.picMaxMinus_norm_close=picMaxMinus_norm_close
-        self.gen_len=gen_len
-        self.termini_coverage_close=termini_coverage_close
-        self.ArtPackmode=ArtPackmode
-        self.termini=termini
-        self.forward=forward
-        self.reverse=reverse
-        self.ArtOrient=ArtOrient
-        self.picMaxPlus_close=picMaxPlus_close
-        self.picMaxMinus_close=picMaxMinus_close
-        self.picOUT_norm_forw=picOUT_norm_forw
-        self.picOUT_norm_rev=picOUT_norm_rev
-        self.picOUT_forw=picOUT_forw
-        self.picOUT_rev=picOUT_rev
-        self.lost_perc=lost_perc
-        self.R1=R1
-        self.R2=R2
-        self.R3=R3
-        self.picMaxPlus_host=picMaxPlus_host
-        self.picMaxMinus_host=picMaxMinus_host
-        self.drop_cov=drop_cov
-        self.added_paired_whole_coverage=added_paired_whole_coverage
-        self.P_concat=P_concat
-
-    def toFile(self,ficname): #TODO: implement me
-        pass
-
-# types of the elements of the class
-# <type 'str'>
-# <type 'numpy.int64'>
-# <type 'numpy.int64'>
-# <type 'str'>
-# <type 'str'>
-# <type 'float'>
-# <class 'pandas.core.frame.DataFrame'>
-# <class 'pandas.core.frame.DataFrame'>
-# <type 'str'>
-# <type 'str'>
-# <type 'int'>
-# <type 'int'>
-# <type 'list'>
-# <type 'str'>
-# <type 'list'>
-# <type 'list'>
-# <type 'list'>
-# <type 'int'>
-# <type 'list'>
-# <type 'str'>
-# <type 'str'>
-# <type 'str'>
-# <type 'str'>
-# <type 'str'>
-# <type 'list'>
-# <type 'list'>
-# <type 'list'>
-# <type 'list'>
-# <type 'list'>
-# <type 'list'>
-# <type 'float'>
-# <type 'float'>
-# <type 'float'>
-# <type 'float'>
-# <type 'str'>
-# <type 'str'>
-# <type 'list'>
-# <type 'list'>
-# <type 'str'>
diff --git a/phageterm/__init__.py b/phageterm/__init__.py
deleted file mode 100644
index fc80254b619d488138a43632b617124a3d324702..0000000000000000000000000000000000000000
--- a/phageterm/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-pass
\ No newline at end of file
diff --git a/phageterm/debug_utils.py b/phageterm/debug_utils.py
deleted file mode 100644
index edb489c8aee8b18b01b802b32a16016be0cbc0bf..0000000000000000000000000000000000000000
--- a/phageterm/debug_utils.py
+++ /dev/null
@@ -1,87 +0,0 @@
-##@file debug_utils.py
-#
-# Contains utility classes for debugging and testing.
-#
-#@author vegrand@pasteur.fr
-
-## Utility class for debugging.
-#
-# Contains the mapping results for 1 read.
-# For map_start,map_end,map_rcpl_start,map_rcpl_stop a value of 0 means that no match was found and a value of 1 means that a match was found.
-class ReadMappingInfo:
-    ##
-    #
-    # @param idx_read Number of the read in the processing (reads are processed in the same order as they are found in the fasta file).
-    # @param map_start Read maps at its beginning (1rts seed characters) or not.
-    # @param map_end Read maps at is end (last seed characters) or not.
-    # @param map_rcpl_start Start of reverse complement maps or not.
-    # @param map_rcpl_end End of reverse complement maps or not.
-    def __init__(self,idx_read,map_start,map_end,map_rcpl_start,map_rcpl_stop):
-        self.idx_read=idx_read
-        self.map_start=map_start
-        self.map_end=map_end
-        self.map_rcpl_start=map_rcpl_start
-        self.map_rcpl_end=map_rcpl_stop
-
-        pass
-
-
-
-
-## Aim of this class is to give the ability to compare the results of readsCoverage (oriinal CPU version) and readsCoverageGPU.
-class ReadMappingInfoLogger:
-    ##
-    #
-    # @param cnt_read count only reads that were not rejected (readlen >= seed)
-    # @param l_rm_info list of ReadMappingInfo objects.
-    # @param cur_r_info ReadMappingInfo for read that s currently being processed.
-    def __init__(self):
-        self.cnt_read = 0 # count only reads that were not rejected (readlen >= seed)
-        self.l_rm_info=[]
-        self.cur_r_info=None
-        self.rw_lst = []
-
-    def add_rw(self, rw):
-        self.rw_lst.append(rw)
-
-    def newRmInfo(self,numR_in_file=None):
-        if self.cur_r_info!=None:
-            self.l_rm_info.append(self.cur_r_info)
-        if (numR_in_file!=None):
-            idx_read=numR_in_file
-        else:
-            idx_read=self.cnt_read
-        self.cur_r_info=ReadMappingInfo(idx_read,0,0,0,0)
-        self.cnt_read+=1
-
-    ## Records the mapping information (does it map or not and where) for the read that is currently being processed.
-    def rMatch(self,akey):
-        if self.cur_r_info == None:
-            raise RuntimeError("Call newRmInfo() before calling rsMatch()")
-        if akey=="mstart":
-            self.cur_r_info.map_start = 1
-        elif akey=="mend":
-            self.cur_r_info.map_end=1
-        elif akey=="mrcplstart":
-            self.cur_r_info.map_rcpl_start=1
-        elif akey=="mrcplend":
-            self.cur_r_info.map_rcpl_end=1
-        else:
-            raise RuntimeError("invalid key to indicate where read matches sequence")
-
-    def getMatchInfoList(self):
-        if self.cur_r_info != None:
-            self.l_rm_info.append(self.cur_r_info)
-        return self.l_rm_info
-
-    ## Flushes all ReadMappingInfo to the given file.
-    def flush(self,filename):
-        self.f_debug = open(filename, "w")
-        if self.cur_r_info != None:
-            self.l_rm_info.append(self.cur_r_info)
-        self.f_debug.write(self.cnt_read)
-        for elm in self.l_rm_info:
-            my_str=str(elm.idx_read)+"|"+str(elm.map_start)+"|"+str(elm.map_end)+"|"+str(elm.map_rcpl_start)+"|"+str(elm.map_rcpl_stop)
-            self.f_debug.write(my_str)
-        self.f_debug.close()
-
diff --git a/phageterm/generate_report.py b/phageterm/generate_report.py
deleted file mode 100644
index abd73fa1f3ae631fc39b9d2b0803c562f1cc7a28..0000000000000000000000000000000000000000
--- a/phageterm/generate_report.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from __future__ import print_function
-import os
-import pickle
-from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, Table, TableStyle, PageBreak
-from reportlab.lib.pagesizes import letter, landscape
-from _modules.functions_PhageTerm import SummaryReport,WorkflowReport,ExportCohesiveSeq,ExportPhageSequence,CreateReport
-
-
-def loadDR(DR_path,DR):
-    for d in os.listdir(DR_path): # iterate over P_class subdirectories.
-        if not os.path.isdir(os.path.join(DR_path,d)):
-            err_str=DR_path+" should contain only directories."
-            raise RuntimeError(err_str)
-        for fic_name in os.listdir(os.path.join(DR_path,d)): # iterate over all files for a given P_class
-            p=os.path.join(DR_path,d)
-            fname=os.path.join(p,fic_name)
-            with open(fname, 'rb') as f:
-                loaded_items=pickle.load(f)
-                # d is P_class name, fic_name is phagename.
-                dict_tmp=dict()
-                dict_tmp["phagename"]=loaded_items[0]
-                dict_tmp["seed"]=loaded_items[1]
-                dict_tmp["added_whole_coverage"]=loaded_items[2]
-                dict_tmp["Redundant"]=loaded_items[3]
-                dict_tmp["P_left"]=loaded_items[4]
-                print("P_left=",dict_tmp["P_left"],type(dict_tmp["P_left"]))
-                dict_tmp["P_right"] = loaded_items[5]
-                print("P_right=",dict_tmp["P_right"],type(dict_tmp["P_right"]))
-                dict_tmp["Permuted"]=loaded_items[6]
-                dict_tmp["P_orient"] =loaded_items[7]
-                dict_tmp["termini_coverage_norm_close"] =loaded_items[8]
-                dict_tmp["picMaxPlus_norm_close"] =loaded_items[9]
-                dict_tmp["picMaxMinus_norm_close"] =loaded_items[10]
-                dict_tmp["gen_len"] =loaded_items[11]
-                dict_tmp["tot_reads"] =loaded_items[12]
-                dict_tmp["P_seqcoh"] =loaded_items[13]
-                dict_tmp["phage_plus_norm"] =loaded_items[14]
-                dict_tmp["phage_minus_norm"] =loaded_items[15]
-                dict_tmp["ArtPackmode"] = loaded_items[16]
-                dict_tmp["termini"] = loaded_items[17]
-                dict_tmp["forward"] = loaded_items[18]
-                dict_tmp["reverse"] = loaded_items[19]
-                dict_tmp["ArtOrient"] = loaded_items[20]
-                dict_tmp["ArtcohesiveSeq"] = loaded_items[21]
-                dict_tmp["termini_coverage_close"] = loaded_items[22]
-                dict_tmp["picMaxPlus_close"] = loaded_items[23]
-                dict_tmp["picMaxMinus_close"] = loaded_items[24]
-                dict_tmp["picOUT_norm_forw"] = loaded_items[25]
-                dict_tmp["picOUT_norm_rev"] = loaded_items[26]
-                dict_tmp["picOUT_forw"] = loaded_items[27]
-                dict_tmp["picOUT_rev"] = loaded_items[28]
-                dict_tmp["lost_perc"] = loaded_items[29]
-                dict_tmp["ave_whole_cov"] = loaded_items[30]
-                dict_tmp["R1"] = loaded_items[31]
-                dict_tmp["R2"] = loaded_items[32]
-                dict_tmp["R3"] = loaded_items[33]
-                dict_tmp["host"] = loaded_items[34]
-                dict_tmp["host_len"] = loaded_items[35]
-                dict_tmp["host_whole_coverage"] = loaded_items[36]
-                dict_tmp["picMaxPlus_host"] = loaded_items[37]
-                dict_tmp["picMaxMinus_host"] = loaded_items[38]
-                dict_tmp["surrounding"] = loaded_items[39]
-                dict_tmp["drop_cov"] = loaded_items[40]
-                dict_tmp["paired"] = loaded_items[41]
-                dict_tmp["insert"] = loaded_items[42]
-                dict_tmp["phage_hybrid_coverage"] = loaded_items[43]
-                dict_tmp["host_hybrid_coverage"] = loaded_items[44]
-                dict_tmp["added_paired_whole_coverage"] = loaded_items[45]
-                dict_tmp["Mu_like"] = loaded_items[46]
-                dict_tmp["test_run"] = loaded_items[47]
-                dict_tmp["P_class"] = loaded_items[48]
-                dict_tmp["P_type"] = loaded_items[49]
-                dict_tmp["P_concat"] = loaded_items[50]
-                dict_tmp["idx_refseq_in_list"] = loaded_items[51]
-                DR [d][fic_name]=dict_tmp
-            f.close()
-
-
-
-
-def genReport(fParms,inDArgs,inRawDArgs,no_match,DR):
-    # Test No Match
-    if len(no_match) == inDArgs.nbr_virome:
-        print("\n\nERROR: No reads match, please check your reference file.")
-        exit()
-
-    # Report Resume
-    multiReport = SummaryReport(inRawDArgs.analysis_name, DR, no_match)
-    multiCohSeq = ""
-    multiPhageSeq = ""
-    multiWorkflow = "#phagename\tClass\tLeft\tRight\tType\tOrient\tCoverage\tComments\n"
-
-    # No Match in workflow
-    if fParms.workflow:
-        for no_match_contig in no_match:
-            multiWorkflow += WorkflowReport(no_match_contig, "-", "-", "-", "-", "-", 0, 1)
-
-    for DPC in DR:
-        for DC in DR[DPC]:
-            # Text report
-            if fParms.workflow: # phagename, P_class, P_left, P_right, P_type, P_orient, ave_whole_cov, multi = 0
-                multiWorkflow += WorkflowReport(DC, DR[DPC][DC]["P_class"], DR[DPC][DC]["P_left"],
-                                                DR[DPC][DC]["P_right"],
-                                                DR[DPC][DC]["P_type"], DR[DPC][DC]["P_orient"],
-                                                DR[DPC][DC]["ave_whole_cov"], 1,DR[DPC][DC]["phage_plus_norm"],
-                                                DR[DPC][DC]["phage_minus_norm"])
-
-            # Sequence
-            idx_refseq = DR[DPC][DC]["idx_refseq_in_list"]
-            refseq = inDArgs.refseq_liste[idx_refseq]
-            multiCohSeq += ExportCohesiveSeq(DC, DR[DPC][DC]["ArtcohesiveSeq"], DR[DPC][DC]["P_seqcoh"], fParms.test_run, 1)
-            multiPhageSeq += ExportPhageSequence(DC, DR[DPC][DC]["P_left"], DR[DPC][DC]["P_right"], refseq,
-                                                 DR[DPC][DC]["P_orient"], DR[DPC][DC]["Redundant"], DR[DPC][DC]["Mu_like"],
-                                                 DR[DPC][DC]["P_class"], DR[DPC][DC]["P_seqcoh"], fParms.test_run, 1)
-
-            # Report
-            draw=0 # TODO VL: ask what is the use of this parameter that is alwayes 0...
-            multiReport = CreateReport(DC, DR[DPC][DC]["seed"], DR[DPC][DC]["added_whole_coverage"], draw,
-                                       DR[DPC][DC]["Redundant"], DR[DPC][DC]["P_left"], DR[DPC][DC]["P_right"],
-                                       DR[DPC][DC]["Permuted"], DR[DPC][DC]["P_orient"],
-                                       DR[DPC][DC]["termini_coverage_norm_close"], DR[DPC][DC]["picMaxPlus_norm_close"],
-                                       DR[DPC][DC]["picMaxMinus_norm_close"], DR[DPC][DC]["gen_len"],
-                                       DR[DPC][DC]["tot_reads"], DR[DPC][DC]["P_seqcoh"], DR[DPC][DC]["phage_plus_norm"],
-                                       DR[DPC][DC]["phage_minus_norm"], DR[DPC][DC]["ArtPackmode"], DR[DPC][DC]["termini"],
-                                       DR[DPC][DC]["forward"], DR[DPC][DC]["reverse"], DR[DPC][DC]["ArtOrient"],
-                                       DR[DPC][DC]["ArtcohesiveSeq"], DR[DPC][DC]["termini_coverage_close"],
-                                       DR[DPC][DC]["picMaxPlus_close"], DR[DPC][DC]["picMaxMinus_close"],
-                                       DR[DPC][DC]["picOUT_norm_forw"], DR[DPC][DC]["picOUT_norm_rev"],
-                                       DR[DPC][DC]["picOUT_forw"], DR[DPC][DC]["picOUT_rev"], DR[DPC][DC]["lost_perc"],
-                                       DR[DPC][DC]["ave_whole_cov"], DR[DPC][DC]["R1"], DR[DPC][DC]["R2"],
-                                       DR[DPC][DC]["R3"], DR[DPC][DC]["host"], DR[DPC][DC]["host_len"],
-                                       DR[DPC][DC]["host_whole_coverage"], DR[DPC][DC]["picMaxPlus_host"],
-                                       DR[DPC][DC]["picMaxMinus_host"], DR[DPC][DC]["surrounding"], DR[DPC][DC]["drop_cov"],
-                                       DR[DPC][DC]["paired"], DR[DPC][DC]["insert"], DR[DPC][DC]["phage_hybrid_coverage"],
-                                       DR[DPC][DC]["host_hybrid_coverage"], DR[DPC][DC]["added_paired_whole_coverage"],
-                                       DR[DPC][DC]["Mu_like"], fParms.test_run, DR[DPC][DC]["P_class"],
-                                       DR[DPC][DC]["P_type"], DR[DPC][DC]["P_concat"], 1, multiReport)
-
-    # Workflow
-    if not fParms.test:
-        if fParms.workflow:
-            filoutWorkflow = open(inRawDArgs.analysis_name + "_workflow.txt", "w")
-            filoutWorkflow.write(multiWorkflow)
-            filoutWorkflow.close()
-
-        # Concatene Sequences
-        filoutCohSeq = open(inRawDArgs.analysis_name + "_cohesive-sequence.fasta", "w")
-        filoutCohSeq.write(multiCohSeq)
-        filoutCohSeq.close()
-
-        filoutPhageSeq = open(inRawDArgs.analysis_name + "_sequence.fasta", "w")
-        filoutPhageSeq.write(multiPhageSeq)
-        filoutPhageSeq.close()
-
-    # Concatene Report
-    doc = SimpleDocTemplate("%s_PhageTerm_report.pdf" % inRawDArgs.analysis_name, pagesize=letter, rightMargin=10,
-                            leftMargin=10, topMargin=5, bottomMargin=10)
-    doc.build(multiReport)
diff --git a/phageterm/readsCoverage_res.py b/phageterm/readsCoverage_res.py
deleted file mode 100644
index dfea8d0a22e3b362f51d8529d915f82cc35f504a..0000000000000000000000000000000000000000
--- a/phageterm/readsCoverage_res.py
+++ /dev/null
@@ -1,311 +0,0 @@
-##@file readsCoverage_res.py
-# Compact structure to store partial results of readsCoverage for later processing; used in multi machine mode and for checkpoints.
-#
-#@author vlegrand@pasteur.fr
-import numpy as np
-import os
-import time
-
-base_chk_fname="chk_"
-chk_fname_sep="_"
-
-
-## Utility classes for testing the checkpoint implementation
-# class checkpoint_visitor:
-#     def __str__(self):
-#         return self.__class__.__name__
-#
-# class checkpoint_visitor_11150_Cos5(checkpoint_visitor):
-#     def visit(self,chk_res):
-#         if chk_res.host_len!=0 or chk_res.gen!=25 or chk_res.reads_tested!=2:
-#             return False
-#         return True
-#
-# class checkpoint_visitor_38_Cos5(checkpoint_visitor):
-#     def visit(self,chk_res):
-#         if chk_res.host_len!=0 or chk_res.gen!=25 or chk_res.reads_tested!=2:
-#             return False
-#         return True
-
-
-
-
-
-
-def loadArr(arr_idx0,arr_val0,arr_idx1,arr_val1,arr2D):
-    for idx, val in zip(arr_idx0, arr_val0):
-        arr2D[0][idx] = val
-
-    for idx, val in zip(arr_idx1, arr_val1):
-        arr2D[1][idx] = val
-
-
-def loadRCRes(filename):
-    npzfile = np.load(filename)
-    gen_len=npzfile['gen_len']
-    gen_len=int(gen_len)
-    host_len=npzfile['host_len']
-    host_len=int(host_len)
-    termini_coverage_idx0 = npzfile['termini_coverage_idx0']
-    termini_coverage_val0=npzfile['termini_coverage_val0']
-    termini_coverage_idx1 = npzfile['termini_coverage_idx1']
-    termini_coverage_val1 = npzfile['termini_coverage_val1']
-
-    whole_coverage_idx0=npzfile['whole_coverage_idx0']
-    whole_coverage_val0 = npzfile['whole_coverage_val0']
-    whole_coverage_idx1 = npzfile['whole_coverage_idx1']
-    whole_coverage_val1 = npzfile['whole_coverage_val1']
-
-    paired_whole_coverage_idx0=npzfile['paired_whole_coverage_idx0']
-    paired_whole_coverage_val0 = npzfile['paired_whole_coverage_val0']
-    paired_whole_coverage_idx1 = npzfile['paired_whole_coverage_idx1']
-    paired_whole_coverage_val1 = npzfile['paired_whole_coverage_val1']
-
-    phage_hybrid_coverage_idx0=npzfile['phage_hybrid_coverage_idx0']
-    phage_hybrid_coverage_val0 = npzfile['phage_hybrid_coverage_val0']
-    phage_hybrid_coverage_idx1 = npzfile['phage_hybrid_coverage_idx0']
-    phage_hybrid_coverage_val1 = npzfile['phage_hybrid_coverage_idx1']
-
-    host_hybrid_coverage_idx0 = npzfile['host_hybrid_coverage_idx0']
-    host_hybrid_coverage_val0 = npzfile['host_hybrid_coverage_val0']
-    host_hybrid_coverage_idx1 = npzfile['host_hybrid_coverage_idx1']
-    host_hybrid_coverage_val1 = npzfile['host_hybrid_coverage_val1']
-
-    host_whole_coverage_idx0 = npzfile['host_whole_coverage_idx0']
-    host_whole_coverage_val0 = npzfile['host_whole_coverage_val0']
-    host_whole_coverage_idx1 = npzfile['host_whole_coverage_idx1']
-    host_whole_coverage_val1 = npzfile['host_whole_coverage_val1']
-
-    list_hybrid=npzfile['list_hybrid']
-    insert=npzfile['insert']
-    insert=list(insert)
-    paired_mismatch=npzfile['paired_mismatch']
-    reads_tested=npzfile['reads_tested']
-
-    termini_coverage=np.array([gen_len*[0], gen_len*[0]])
-
-    whole_coverage        = np.array([gen_len*[0], gen_len*[0]])
-    paired_whole_coverage = np.array([gen_len*[0], gen_len*[0]])
-    phage_hybrid_coverage = np.array([gen_len*[0], gen_len*[0]])
-    host_hybrid_coverage  = np.array([host_len*[0], host_len*[0]])
-    host_whole_coverage   = np.array([host_len*[0], host_len*[0]])
-    loadArr(termini_coverage_idx0,termini_coverage_val0,termini_coverage_idx1,termini_coverage_val1,termini_coverage)
-    loadArr(whole_coverage_idx0,whole_coverage_val0,whole_coverage_idx1,whole_coverage_val1,whole_coverage)
-    loadArr(paired_whole_coverage_idx0,paired_whole_coverage_val0,paired_whole_coverage_idx1,paired_whole_coverage_val1,paired_whole_coverage)
-    loadArr(phage_hybrid_coverage_idx0,phage_hybrid_coverage_val0,phage_hybrid_coverage_idx1,phage_hybrid_coverage_val1,phage_hybrid_coverage)
-    loadArr(host_hybrid_coverage_idx0,host_hybrid_coverage_val0,host_hybrid_coverage_idx1,host_hybrid_coverage_val1,host_hybrid_coverage)
-    loadArr(host_whole_coverage_idx0,host_whole_coverage_val0,host_whole_coverage_idx1,host_whole_coverage_val1,host_whole_coverage)
-
-    res=RCRes(termini_coverage,whole_coverage,paired_whole_coverage,\
-              phage_hybrid_coverage, host_hybrid_coverage,\
-              host_whole_coverage,list_hybrid,insert,paired_mismatch,reads_tested)
-
-    return res
-
-##
-# Working structure for readsCoverage (encapsulating temporary results)
-class RCWorkingS:
-    def __init__(self,rc_res,cnt_line,read_match):
-        self.interm_res=rc_res
-        self.count_line=cnt_line
-        self.read_match=read_match
-
-class RCRes:
-    def __init__(self,termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_mismatch,reads_tested):
-
-        self.termini_coverage=termini_coverage
-        self.whole_coverage=whole_coverage
-        self.paired_whole_coverage=paired_whole_coverage
-        self.phage_hybrid_coverage=phage_hybrid_coverage
-        self.host_hybrid_coverage=host_hybrid_coverage
-        self.host_whole_coverage=host_whole_coverage
-
-        self.list_hybrid=list_hybrid
-        self.insert=insert
-        self.paired_mismatch=paired_mismatch
-        self.reads_tested=reads_tested
-
-        self.gen_len = len(self.termini_coverage[0])
-        self.host_len= len(self.host_hybrid_coverage[0])
-
-    # def accept(self,a_visitor):
-    #     self.vis=a_visitor
-    #
-    # def make_visit(self):
-    #     self.vis.visit()
-
-    def save(self,filename):
-        termini_coverage_idx0 = np.flatnonzero(self.termini_coverage[0])
-        termini_coverage_val0 = self.termini_coverage[0][termini_coverage_idx0]
-        termini_coverage_idx1 = np.flatnonzero(self.termini_coverage[1])
-        termini_coverage_val1 = self.termini_coverage[1][termini_coverage_idx1]
-
-        whole_coverage_idx0 = np.flatnonzero(self.whole_coverage[0])
-        whole_coverage_val0 = self.whole_coverage[0][whole_coverage_idx0]
-        whole_coverage_idx1 = np.flatnonzero(self.whole_coverage[1])
-        whole_coverage_val1 = self.whole_coverage[1][whole_coverage_idx1]
-
-        paired_whole_coverage_idx0 = np.flatnonzero(self.paired_whole_coverage[0])
-        paired_whole_coverage_val0 = self.paired_whole_coverage[0][paired_whole_coverage_idx0]
-        paired_whole_coverage_idx1 = np.flatnonzero(self.paired_whole_coverage[1])
-        paired_whole_coverage_val1 = self.paired_whole_coverage[1][paired_whole_coverage_idx1]
-
-        phage_hybrid_coverage_idx0 = np.flatnonzero(self.phage_hybrid_coverage[0])
-        phage_hybrid_coverage_val0 = self.phage_hybrid_coverage[0][phage_hybrid_coverage_idx0]
-        phage_hybrid_coverage_idx1 = np.flatnonzero(self.phage_hybrid_coverage[1])
-        phage_hybrid_coverage_val1 = self.phage_hybrid_coverage[1][phage_hybrid_coverage_idx1]
-
-        host_hybrid_coverage_idx0 = np.flatnonzero(self.host_hybrid_coverage[0])
-        host_hybrid_coverage_val0 = self.host_hybrid_coverage[0][host_hybrid_coverage_idx0]
-        host_hybrid_coverage_idx1 = np.flatnonzero(self.host_hybrid_coverage[1])
-        host_hybrid_coverage_val1 = self.host_hybrid_coverage[1][host_hybrid_coverage_idx1]
-
-        host_whole_coverage_idx0 = np.flatnonzero(self.host_whole_coverage[0])
-        host_whole_coverage_val0 = self.host_whole_coverage[0][host_whole_coverage_idx0]
-        host_whole_coverage_idx1 = np.flatnonzero(self.host_whole_coverage[1])
-        host_whole_coverage_val1 = self.host_whole_coverage[1][host_whole_coverage_idx1]
-
-        np.savez(filename,gen_len=np.array(self.gen_len),host_len=np.array(self.host_len),\
-                 termini_coverage_idx0=termini_coverage_idx0, termini_coverage_val0=termini_coverage_val0,\
-                 termini_coverage_idx1=termini_coverage_idx1, termini_coverage_val1=termini_coverage_val1,\
-                 whole_coverage_idx0=whole_coverage_idx0,whole_coverage_val0=whole_coverage_val0,\
-                 whole_coverage_idx1=whole_coverage_idx1,whole_coverage_val1=whole_coverage_val1,\
-                 paired_whole_coverage_idx0=paired_whole_coverage_idx0,paired_whole_coverage_val0=paired_whole_coverage_val0,\
-                 paired_whole_coverage_idx1=paired_whole_coverage_idx1,paired_whole_coverage_val1=paired_whole_coverage_val1, \
-                 phage_hybrid_coverage_idx0=phage_hybrid_coverage_idx0,phage_hybrid_coverage_val0=phage_hybrid_coverage_val0, \
-                 phage_hybrid_coverage_idx1=phage_hybrid_coverage_idx1,phage_hybrid_coverage_val1=phage_hybrid_coverage_val1, \
-                 host_hybrid_coverage_idx0=host_hybrid_coverage_idx0,host_hybrid_coverage_val0=host_hybrid_coverage_val0, \
-                 host_hybrid_coverage_idx1=host_hybrid_coverage_idx1,host_hybrid_coverage_val1=host_hybrid_coverage_val1, \
-                 host_whole_coverage_idx0=host_whole_coverage_idx0,host_whole_coverage_val0=host_whole_coverage_val0, \
-                 host_whole_coverage_idx1=host_whole_coverage_idx1,host_whole_coverage_val1=host_whole_coverage_val1, \
-                 list_hybrid=self.list_hybrid,insert=self.insert,paired_mismatch=np.array(self.paired_mismatch),\
-                 reads_tested=self.reads_tested)
-
-
-class RCCheckpoint:
-    def __init__(self,count_line,core_id,idx_seq,termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_mismatch,reads_tested,read_match):
-        self.count_line=count_line
-        self.core_id=core_id
-        self.idx_seq=idx_seq
-        self.read_match=read_match
-        self.res=RCRes(termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_mismatch,reads_tested)
-
-
-    def save(self,dir_chk,core_id,idx_refseq):
-        filename=base_chk_fname+str(self.core_id)+chk_fname_sep+str(self.idx_seq)+chk_fname_sep+\
-                 str(self.count_line)+chk_fname_sep+str(self.read_match)
-        full_fname = os.path.join(dir_chk, filename)
-        self.res.save(full_fname)
-        # remove old breakpoint file
-        list_f=os.listdir(dir_chk)
-        sub_s=base_chk_fname+ str(core_id) + chk_fname_sep + str(idx_refseq) + chk_fname_sep
-        for f in list_f:
-            if f!=filename+".npz" and sub_s in f:
-                os.remove(os.path.join(dir_chk,f))
-
-
-class RCCheckpoint_handler:
-    def __init__(self,chk_freq,dir_chk,test_mode=False):
-        self.chk_freq=chk_freq
-        self.test_mode = test_mode
-        self.start_t=0
-        self.dir_chk = dir_chk
-        # if self.test_mode == True:
-        #     self.v38_C5 = checkpoint_visitor_38_Cos5()
-        #     self.v11150_C5 = checkpoint_visitor_11150_Cos5()
-        if self.test_mode==True:
-            self.start_t = time.perf_counter_ns()
-            if os.path.exists(dir_chk):
-                if not (os.path.isdir(dir_chk)):
-                    raise RuntimeError("dir_chk must point to a directory")
-            else:
-                os.mkdir(dir_chk)
-        elif self.chk_freq!=0:
-            if os.path.exists(dir_chk):
-                if not (os.path.isdir(dir_chk)):
-                    raise RuntimeError("dir_chk must point to a directory")
-            else:
-                raise RuntimeError("dir_chk must point to an existing directory")
-
-    def getIdxSeq(self,core_id):
-        idx_seq=0
-        if self.chk_freq!=0 or self.test_mode==True:
-            list_f = os.listdir(self.dir_chk)
-            subfname = base_chk_fname+ str(core_id) + chk_fname_sep
-            chk_f = ""
-            for fname in list_f:
-                if subfname in fname:
-                    chk_f = fname
-                    break
-            if chk_f != "":
-                l=chk_f.split(chk_fname_sep)
-                idx_seq=int(l[2])
-        return idx_seq
-
-
-    def load(self,core_id,idx_refseq):
-        if self.chk_freq!=0 or self.test_mode==True:
-            list_f = os.listdir(self.dir_chk)
-            subfname = base_chk_fname+ str(core_id) + chk_fname_sep + str(idx_refseq) + chk_fname_sep
-            chk_f = ""
-            for fname in list_f:
-                if subfname in fname:
-                    chk_f = fname
-                    break
-            if chk_f != "":
-                interm_res=loadRCRes(os.path.join(self.dir_chk,chk_f))
-                # if self.test_mode==True:
-                #     interm_res.accept(self.v38_C5)
-                l=chk_f.split(chk_fname_sep)
-                cnt_line=int(l[-2])
-                tmp=l[-1] # get rid of .npz extension
-                l2=tmp.split(".")
-                read_match=int(l2[0])
-                partial_res=RCWorkingS(interm_res,cnt_line,read_match)
-                # if self.test_mode:
-                #     partial_res.accept(self.v38_C5)
-                #     partial_res.make_visit()
-                return partial_res
-            else:  # no checkpoint found for this sequence, start from beginning
-                return None
-        else:
-            return None
-
-
-    def check(self,count_line,core_id,idx_seq,termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_mismatch,reads_tested,read_match):
-        cur_t = time.perf_counter_ns()
-        elapsed_t = cur_t - self.start_t
-        #convert elapsed_t tp to seconds
-        elaspsed_t=elapsed_t * 0.000000001
-        if (self.test_mode==True or (self.chk_freq!=0 and (elapsed_t % self.chk_freq == 0))):  # time to create checkpoint.
-            chkp=RCCheckpoint(count_line,core_id,idx_seq,termini_coverage,whole_coverage,paired_whole_coverage,\
-                 phage_hybrid_coverage, host_hybrid_coverage, \
-                 host_whole_coverage,list_hybrid,insert,paired_mismatch,reads_tested,read_match)
-            chkp.save(self.dir_chk,core_id,idx_seq)
-
-
-    def end(self,core_id):
-        if (self.test_mode==False and self.chk_freq!=0) :
-            # remove old breakpoint file
-            list_f = os.listdir(self.dir_chk)
-            sub_s=base_chk_fname+str(core_id)+chk_fname_sep
-            for f in list_f:
-                if sub_s in f:
-                    os.remove(os.path.join(self.dir_chk, f))
-
-
-
-
-
-
-
-
-
diff --git a/phageterm/seq_processing.py b/phageterm/seq_processing.py
deleted file mode 100755
index f3c66b2e474be2c34fbf5c09b8c264c65d93bc13..0000000000000000000000000000000000000000
--- a/phageterm/seq_processing.py
+++ /dev/null
@@ -1,95 +0,0 @@
-##@file seq_processing.py
-#
-# This file contains functions that are used when running phageterm on multiple machines on a calculation cluster.
-# @param DR_Path directory path where to put DR content.
-from __future__ import print_function
-
-from time import gmtime, strftime
-import os
-import numpy as np
-from _modules.utilities import checkReportTitle
-from _modules.readsCoverage_res import loadRCRes
-from _modules.common_readsCoverage_processing import processCovValuesForSeq
-#from SeqStats import SeqStats
-def sum_readsCoverage_for_seq(dir_cov_res,idx_refseq,nb_pieces,inDArgs,fParms,inRawDArgs,dir_seq_res,DR_path):
-    if os.path.exists(DR_path):
-        if not (os.path.isdir(DR_path)):
-            raise RuntimeError("DR_path must point to a directory")
-    else:
-        os.mkdir(DR_path)
-    DR = {"Headful (pac)": {}, "COS (5')": {}, "COS (3')": {}, "COS": {}, "DTR (short)": {}, "DTR (long)": {},
-          "Mu-like": {}, "UNKNOWN": {}, "NEW": {}}
-    print("going to load ",nb_pieces," files for sequence ",idx_refseq)
-    print(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
-    for i in range(0,nb_pieces):
-        fic_name = os.path.join(dir_cov_res, "coverage" + str(idx_refseq) + "_" + str(i)+".npz")
-        print("loading file: ",fic_name)
-        print(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
-        partial_res=loadRCRes(fic_name)
-        #npzfile=np.load(fic_name)
-        if i == 0:
-            termini_coverage = partial_res.termini_coverage
-            whole_coverage = partial_res.whole_coverage
-            paired_whole_coverage = partial_res.paired_whole_coverage
-            phage_hybrid_coverage = partial_res.phage_hybrid_coverage
-            host_hybrid_coverage = partial_res.host_hybrid_coverage
-            host_whole_coverage = partial_res.host_whole_coverage
-            list_hybrid = partial_res.list_hybrid
-            insert = partial_res.insert
-            paired_missmatch = partial_res.paired_mismatch
-            reads_tested = partial_res.reads_tested
-        else:
-            termini_coverage += partial_res.termini_coverage
-            whole_coverage += partial_res.whole_coverage
-            paired_whole_coverage += partial_res.paired_whole_coverage
-            phage_hybrid_coverage += partial_res.phage_hybrid_coverage
-            host_hybrid_coverage += partial_res.host_hybrid_coverage
-            host_whole_coverage += partial_res.host_whole_coverage
-            list_hybrid += partial_res.list_hybrid
-            insert += partial_res.insert
-            paired_missmatch += partial_res.paired_mismatch
-            reads_tested += partial_res.reads_tested
-
-    # fic_name = os.path.join(dir_seq_res, "coverage" + str(idx_refseq))
-    # np.savez(fic_name, termini_coverage=termini_coverage, whole_coverage=whole_coverage,
-    #          paired_whole_coverage=paired_whole_coverage, \
-    #          phage_hybrid_coverage=phage_hybrid_coverage, host_hybrid_coverage=host_hybrid_coverage, \
-    #          host_whole_coverage=host_whole_coverage, list_hybrid=list_hybrid, insert=insert,
-    #          paired_missmatch=np.array(paired_missmatch))
-    termini_coverage = termini_coverage.tolist()
-    whole_coverage = whole_coverage.tolist()
-    paired_whole_coverage = paired_whole_coverage.tolist()
-    phage_hybrid_coverage = phage_hybrid_coverage.tolist()
-    host_hybrid_coverage = host_hybrid_coverage.tolist()
-    host_whole_coverage = host_whole_coverage.tolist()
-    list_hybrid = list_hybrid.tolist()
-
-    if sum(termini_coverage[0]) + sum(termini_coverage[1]) == 0:
-        no_match_file="no_natch"+str(idx_refseq)
-        f=open(os.path.join(dir_seq_res,no_match_file),"w")
-        f.write((checkReportTitle(seq_name[idx_refseq])))
-        f.close()
-
-    print("finished sum, calling processCovValuesForSeq")
-    print(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
-    # TODO: having so many values in input and returned is ugly and bad for readibility and maintanability. Group those who are related in structures.
-    refseq = inDArgs.refseq_liste[idx_refseq]
-    S_stats=processCovValuesForSeq(refseq, inDArgs.hostseq, inDArgs.refseq_name, inDArgs.refseq_liste, fParms.seed,
-                            inRawDArgs.analysis_name, inRawDArgs.tot_reads, \
-                            idx_refseq, fParms.test_run, inRawDArgs.paired, fParms.edge, inRawDArgs.host,
-                            fParms.test, fParms.surrounding, \
-                            fParms.limit_preferred, fParms.limit_fixed, fParms.Mu_threshold, termini_coverage,
-                            whole_coverage, \
-                            paired_whole_coverage, phage_hybrid_coverage, host_hybrid_coverage,
-                            host_whole_coverage, insert, list_hybrid, reads_tested, DR,DR_path)
-    #fic_name = os.path.join(dir_seq_res, "seq_stats" + str(idx_refseq))
-    # S_stats.toFile(fic_name) s_stats content is only used in the case where there is only 1 sequence. I'm not interested in this case here since sum_readsCoverage_for_seq will be used for viromes.
-    # so, just drop s_stat and forget it...
-    # Only writing DR content to file is usefuk in the case of a virome processing over several machines on a cluster.
-    print("exit sum_readsCoverage_for_seq")
-    print(strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
-
-
-
-
-
diff --git a/phageterm/utilities.py b/phageterm/utilities.py
deleted file mode 100644
index 40ddd3b602aba317c700ad57eb6fd5729cafb65f..0000000000000000000000000000000000000000
--- a/phageterm/utilities.py
+++ /dev/null
@@ -1,106 +0,0 @@
-## @file utilities.py
-#
-# Gather here utility methods for phageterm. Used in both CPU and GPU version.
-#from string import maketrans
-import re
-import random
-import sys
-
-import numpy as np
-import datetime
-
-if sys.version_info < (3,):
-    import string
-    TRANSTAB = string.maketrans("ACGTN", "TGCAN")
-else:
-    TRANSTAB = str.maketrans("ACGTN", "TGCAN")
-
-def checkReportTitle(report_title):
-    """Normalise report title (take out any special char)"""
-    default_title="Analysis_"
-    right_now=datetime.datetime.now()
-    default_title+=str(right_now.month)
-    default_title+=str(right_now.day)
-    default_title+="_"
-    default_title+=str(right_now.hour)
-    default_title+=str(right_now.minute)
-    titleNorm = ""
-    charok = list(range(48,58)) + list(range(65,91)) + list(range(97,123)) + [45,95]
-    for char in report_title:
-        if ord(char) in charok:
-            titleNorm += char
-    if len(titleNorm) > 1:
-        return titleNorm[:20]
-    else:
-        return default
-
-### SEQUENCE manipulation function
-def changeCase(seq):
-    """Change lower case to UPPER CASE for a sequence string."""
-    return seq.upper()
-
-
-def reverseComplement(seq, transtab=str.maketrans('ATGCN', 'TACGN')):
-    """Reverse Complement a sequence."""
-    return changeCase(seq).translate(transtab)[::-1]
-
-def longest_common_substring(read, refseq):
-    """Longest common substring between two strings."""
-    m = [[0] * (1 + len(refseq)) for i in range(1 + len(read))]
-    longest, x_longest = 0, 0
-    for x in range(1, 1 + len(read)):
-        for y in range(1, 1 + len(refseq)):
-            if read[x - 1] == refseq[y - 1]:
-                m[x][y] = m[x - 1][y - 1] + 1
-                if m[x][y] > longest:
-                    longest = m[x][y]
-                    x_longest = x
-            else:
-                m[x][y] = 0
-    return read[x_longest - longest: x_longest]
-
-def hybridCoverage(read, sequence, hybrid_coverage, start, end):
-    """Return hybrid coverage."""
-    aligned_part_only = longest_common_substring(read, sequence[start:end])
-    for i in range(start, min(len(sequence),start+len(aligned_part_only))):
-        hybrid_coverage[i]+=1
-    return hybrid_coverage
-
-## Determines if readPart maps against Sequence.
-#
-# @param readPart A part of a read (seed characters usually)
-# @param sequence (a contig)
-# It choses randomly a mapping position amongst all mappings found.
-# It returns 2 numbers: the start and stop position of the chosen mapping location.
-def applyCoverage(readPart, sequence):
-    """Return a random match of a read onto the sequence. """
-    position = []
-    for pos in re.finditer(readPart,sequence):
-        position.append(pos)
-    if len(position) > 0:
-        match = random.choice(position)
-        return match.start(), match.end()
-    else:
-        return -1, -1
-
-def correctEdge(coverage, edge):
-    """Correction of the Edge coverage. """
-    correctCov = np.array([len(coverage[0])*[0], len(coverage[0])*[0]])
-    End = len(coverage[0])
-    covSta = range(edge)
-    covEnd = range(End-edge,End)
-    for i in range(len(coverage)):
-        for j in range(len(coverage[i])):
-            correctCov[i][j] = coverage[i][j]
-        for k in covSta:
-            correctCov[i][k+edge] += coverage[i][k+End-edge]
-        for l in covEnd:
-            correctCov[i][l-edge] += coverage[i][l-End+edge]
-    return correctCov
-
-# utility class for storing results of decisionProcess function
-class DecisionProcessOutput:
-    def __init__(self, Redundant, Permuted, P_class, P_type, P_seqcoh, P_concat,
-                 P_orient, P_left, P_right, Mu_like):
-        pass
-
diff --git a/phageterm/IData_handling.py b/phagetermvirome/IData_handling.py
similarity index 100%
rename from phageterm/IData_handling.py
rename to phagetermvirome/IData_handling.py
diff --git a/phageterm/PhageTerm.py b/phagetermvirome/PhageTerm.py
similarity index 100%
rename from phageterm/PhageTerm.py
rename to phagetermvirome/PhageTerm.py
diff --git a/_modules/SeqStats.py b/phagetermvirome/SeqStats.py
similarity index 100%
rename from _modules/SeqStats.py
rename to phagetermvirome/SeqStats.py
diff --git a/__init__.py b/phagetermvirome/__init__.py
similarity index 100%
rename from __init__.py
rename to phagetermvirome/__init__.py
diff --git a/phageterm/common_readsCoverage_processing.py b/phagetermvirome/common_readsCoverage_processing.py
similarity index 100%
rename from phageterm/common_readsCoverage_processing.py
rename to phagetermvirome/common_readsCoverage_processing.py
diff --git a/_modules/debug_utils.py b/phagetermvirome/debug_utils.py
similarity index 100%
rename from _modules/debug_utils.py
rename to phagetermvirome/debug_utils.py
diff --git a/phageterm/functions_PhageTerm.py b/phagetermvirome/functions_PhageTerm.py
similarity index 100%
rename from phageterm/functions_PhageTerm.py
rename to phagetermvirome/functions_PhageTerm.py
diff --git a/_modules/generate_report.py b/phagetermvirome/generate_report.py
similarity index 100%
rename from _modules/generate_report.py
rename to phagetermvirome/generate_report.py
diff --git a/phageterm/main_utils.py b/phagetermvirome/main_utils.py
similarity index 100%
rename from phageterm/main_utils.py
rename to phagetermvirome/main_utils.py
diff --git a/_modules/readsCoverage_res.py b/phagetermvirome/readsCoverage_res.py
similarity index 100%
rename from _modules/readsCoverage_res.py
rename to phagetermvirome/readsCoverage_res.py
diff --git a/_modules/seq_processing.py b/phagetermvirome/seq_processing.py
similarity index 100%
rename from _modules/seq_processing.py
rename to phagetermvirome/seq_processing.py
diff --git a/_modules/utilities.py b/phagetermvirome/utilities.py
similarity index 100%
rename from _modules/utilities.py
rename to phagetermvirome/utilities.py
diff --git a/pyproject.toml b/pyproject.toml
index e2f7caf60c8bb61ba3d9f7a72aee65c6ef1ae3fd..f9449697aa50ff3bb784da578f746546ac93ad56 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,12 +1,18 @@
 [tool.poetry]
-name = "phageterm"
-version = "4.1.1"
+name = "phagetermvirome"
+version = "4.2b"
 description = "Using sequencing bias to identify phages terminii and phage genome packaging."
 authors = ["Marc Monot <marc.monot@pasteur.fr>",
            "Julian Garneau <julian.garneau@unil.ch>",
            "Veronique Legrand <veronique.legrand@pasteur.fr>"]
 license = "AGPLv3+"
 readme = "README.md"
+packages = [ {include ="phagetermvirome/*.py"} ]
+include = [ {path = "test-data" } ]
+# include = [ {path = "data-virome" } ] # Finally don't include these files because the archive is too big then.
+
+[tool.poetry.scripts]
+phageterm = 'phagetermvirome.PhageTerm:main'
 
 [tool.poetry.dependencies]
 python = [">=3.9,<3.13"]
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index a06b7a6debe656435c7be37a8cc056bda5b941ce..0000000000000000000000000000000000000000
--- a/requirements.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-backports
-backports.functools_lru_cache
-backports_abc
-cycler
-libwebp-base
-lz4-c
-matplotlib
-numpy
-openssl
-pandas
-patsy
-pillow
-pip
-pyparsing
-python3.6
-python-dateutil
-python_abi
-pytz
-readline
-reportlab
-scikit-learn
-scipy
-setuptools
-statsmodels
-tk
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 416be7cb8790297307a0738e8606665d8e0a05f2..0000000000000000000000000000000000000000
--- a/setup.cfg
+++ /dev/null
@@ -1,100 +0,0 @@
-[metadata]
-name = phageTermVirome
-version = 2.1
-author = Marc Monot
-author_email = marc.monot@pasteur.fr
-description = Using sequencing bias to identify phages terminii and phage genome packaging.
-long_description = file: README.txt
-long_description_content_type = text/plain
-url = https://gitlab.pasteur.fr/vlegrand/ptv
-project_urls =
-    Bug Tracker = https://gitlab.pasteur.fr/vlegrand/ptv/-/issues
-classifiers =
-    Programming Language :: Python :: 3
-    License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)
-    Operating System :: OS Independent
-    Development Status :: 5 - Production/Stable
-platforms=any
-
-[options]
-package_dir =
-    ptv = .
-    = _modules
-
-
-[options.entry_points]
-console_script =
-    PhageTermVirome = ptv.PhageTerm:main
-
-python_requires = >=3.6
-
-[options.packages.find]
-#where = .
-exclude =
-          .*non-regression-tests.*
-          .*non-regression-tests
-
-install_requires =
-    backports
-    backports.functools_lru_cache
-    backports_abc
-    cycler
-    libwebp-base
-    lz4-c
-    matplotlib
-    numpy
-    openssl
-    pandas
-    patsy
-    pillow
-    pip
-    pyparsing
-    python3.6
-    python-dateutil
-    python_abi
-    pytz
-    readline
-    reportlab
-    scikit-learn
-    scipy
-    setuptools
-    statsmodels
-    tk
-
-[options.data_files]=
-test_data = test-data/COS-3.500.fastq
-              test-data/COS-3.fasta
-              test-data/COS-3.fastq
-              test-data/COS-5.fasta
-              test-data/COS-5.fastq
-              test-data/DTR-long.fasta
-              test-data/DTR-long.fastq
-              test-data/DTR-short.fasta
-              test-data/DTR-short.fastq
-              test-data/Headful.fasta
-              test-data/Headful.fastq
-              test-data/Mu-like.fasta
-              test-data/Mu-like_R1.fastq
-              test-data/Mu-like_R2.fastq
-              test-data/Virome.fasta
-              test-data/Virome.fastq
-              test-data/chk_0_2_10_0.npz
-#    data-virome=data-virome/Contigs_30min.fasta
-#  data-virome/SRR4295172_2_div6.fastq
-# data-virome/SRR4295172_1_div6.fastq
-#      unit-tests-data=unit-tests/data/G-janv_S2_R1_001.fastq.500
-#                     unit-tests/data/G-janv_S2_R2_001.fastq.500
-#                     unit-tests/data/chk_0_0_38_863.npz
-#                     unit-tests/data/coverage0_0.npz
-#                     unit-tests/data/seq1_2_3.fasta
-#     non-regression-tests-data=non-regression-tests/data/HK97_assembly.fasta
-#                               non-regression-tests/data/Lambda_assembly.fasta
-#                               non-regression-tests/data/P1_assembly.fasta
-#                               non-regression-tests/data/R1_1M_READS_EACH_PHAGE(1).fastq.4
-#                               non-regression-tests/data/R1_1M_READS_EACH_PHAGE.fastq.20
-#                               non-regression-tests/data/R2_1M_READS_EACH_PHAGE(1).fastq.4
-#                               non-regression-tests/data/R2_1M_READS_EACH_PHAGE.fastq.20
-#                               non-regression-tests/data/Staph1N_assembly.fasta
-#                               non-regression-tests/data/T4_assembly.fasta
-#                               non-regression-tests/data/T7_assembly.fasta
-#                               non-regression-tests/data/virome_6seq.fa
diff --git a/setup.py b/setup.py
deleted file mode 100644
index ea7957dca99fdb80407f09f90bcb5b7623134939..0000000000000000000000000000000000000000
--- a/setup.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""A setuptools based setup module.
-See:
-https://packaging.python.org/guides/distributing-packages-using-setuptools/
-https://github.com/pypa/sampleproject
-"""
-
-# Always prefer setuptools over distutils
-from setuptools import setup, find_packages
-#import pathlib
-
-#here = pathlib.Path(__file__).parent.resolve()
-
-# Get the long description from the README file
-# long_description = (here / "README.md").read_text(encoding="utf-8")
-#long_description = (here / "README.txt").read_text(encoding="utf-8")
-
-setup()
\ No newline at end of file