diff --git a/server/api/tasks.py b/server/api/tasks.py
index 5d0ad4ebfc25e653a5c98ba6ce17d3507ee48ddc..7e52416e4787930bed5450be09a0d47bb1d30d40 100644
--- a/server/api/tasks.py
+++ b/server/api/tasks.py
@@ -32,8 +32,10 @@ def run_analyis(
     # current_history = gi.histories.create_history()
     # history_id = current_history["id"]
     chained_tasks = chain(
-        create_history.si(f"{settings.HOSTLABEL} - {project_name} - {analysis_name}", analysis_id),
-        upload_file_to_galaxy.s(experiment_file_path, "excel.xls",),
+        create_history.si(
+            f"{settings.HOSTLABEL} - {project_name} - {analysis_name}", analysis_id
+        ),
+        upload_file_to_galaxy.s(experiment_file_path, "excel.xls"),
         genome_scan_wf.s(tools_params, wf_id),
         load_lod_score.s(),
         load_significance_threshold.s(),
@@ -78,7 +80,6 @@ def upload_file_to_galaxy(ids, file_path, file_type):
     analysis_id, history_id = ids
     # gi = GalaxyInstance(url=url, key=key, verify=False)
     upload_response = gi.tools.upload_file(file_path, history_id, file_type=file_type)
-
     upload_data_id = upload_response["outputs"][0]["id"]
     upload_job = upload_response["jobs"][0]
     upload_job_id = upload_job["id"]
@@ -101,7 +102,7 @@ def upload_file_to_galaxy(ids, file_path, file_type):
             f"error : {data['misc_info']}"
         )
     shutil.rmtree(Path(file_path).parent)
-    return (analysis_id, history_id, upload_data_id)
+    return analysis_id, history_id, upload_data_id
 
 
 @celery_app.task()
@@ -132,7 +133,7 @@ def genome_scan_wf(ids, tools_params, wf_id):
         #             f"Error during Galaxy workflow job - name : "
         #             f"id : {workflow_job_id}, "
         #         )
-        return analysis_id, history_id
+        return analysis_id, history_id, workflow_job_id
     except StopIteration:
         raise Exception(f"Do not find the workflow : {genome_scan_wf_metadata['name']}")
 
@@ -174,7 +175,7 @@ def download_formatted_qtl2_data(history_id):
 
 @celery_app.task()
 def load_lod_score(ids):
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     dataset_pattern = "lod.csv"
     try:
         datasets = gi.histories.show_history(history_id, contents=True)
@@ -200,13 +201,12 @@ def load_lod_score(ids):
             lod_entry = LodScore(analysis=analysis, lod_scores=lod_scores)
             lod_entry.save()
             # LodScore.objects.bulk_create([lod_scores])
-    return analysis_id, history_id
+    return analysis_id, history_id, lod_entry.id
 
 
 @celery_app.task()
 def load_refine_peaks(ids):
-    print(ids)
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     dataset_pattern = "refine-peaks.csv"
     try:
         # TODO: test to put a fake history in order to catch exception and
@@ -237,15 +237,15 @@ def load_refine_peaks(ids):
                 peak.save()
                 peak_ids.append(peak.id)
 
-    return analysis_id, history_id
+    return analysis_id, history_id, peak_ids
 
 
 @celery_app.task()
 def load_significance_threshold(ids):
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     # gi = GalaxyInstance(url=url, key=key, verify=False)
     dataset_pattern = "significance-threshold.csv"
-    ids = []
+    significance_threshold_ids = []
     try:
         # TODO: test to put a fake history in order to catch exception and
         # do whatever needed
@@ -259,7 +259,6 @@ def load_significance_threshold(ids):
     except StopIteration:
         raise Exception(f"Do not find the dataset : {dataset_pattern}")
     for dataset in filtered_datasets:
-        print(dataset["name"])
         try:
             downloaded_ds = gi.datasets.download_dataset(dataset["id"])
         except HTTPError:
@@ -274,14 +273,14 @@ def load_significance_threshold(ids):
             bulk = threshold_gen(signi_thres, analysis)
             for threshold_db in bulk:
                 threshold_db.save()
-                ids.append(threshold_db.id)
+                significance_threshold_ids.append(threshold_db.id)
             # LodScoreSignificanceThreshold.objects.bulk_create(bulk, batch_size=10000)
-    return analysis_id, history_id
+    return analysis_id, history_id, significance_threshold_ids
 
 
 @celery_app.task()
 def load_coefficient(ids):
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     dataset_pattern = "coef.csv"
     try:
         # TODO: test to put a fake history in order to catch exception and
@@ -318,12 +317,12 @@ def load_coefficient(ids):
             coef_gen = coefficient_gen(file_list[1:])
             coef_db = Coefficient(peak=peak, coefficients=list(coef_gen))
             coef_db.save()
-    return analysis_id, history_id
+    return analysis_id, history_id, coef_db.id
 
 
 @celery_app.task()
 def load_haplotypes(ids):
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     dataset_pattern = "haplo.csv"
     try:
         datasets = gi.histories.show_history(history_id, contents=True)
@@ -356,12 +355,12 @@ def load_haplotypes(ids):
             haplo_gen = haplotype_gen(file_list[1:])
             haplotype_db = Haplotypes(peak=peak, haplotypes=list(haplo_gen))
             haplotype_db.save()
-    return analysis_id, history_id
+    return analysis_id, history_id, haplotype_db.id
 
 
 @celery_app.task()
 def load_top_snps(ids):
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     dataset_pattern = "top_snps.csv"
     try:
         datasets = gi.histories.show_history(history_id, contents=True)
@@ -393,12 +392,12 @@ def load_top_snps(ids):
             top_snps = list(top_snps_gen(file_list[1:]))
             top_snps_db = TopSnps(peak=peak, top_snps=top_snps)
             top_snps_db.save()
-    return analysis_id, history_id
+    return analysis_id, history_id, top_snps_db.id
 
 
 @celery_app.task()
 def load_snps_association(ids):
-    analysis_id, history_id = ids
+    analysis_id, history_id, _ = ids
     dataset_pattern = "snps_assoc.csv"
     try:
         datasets = gi.histories.show_history(history_id, contents=True)
@@ -432,7 +431,7 @@ def load_snps_association(ids):
             snps_assoc = list(snps_association_gen(file_list[1:]))
             snps_assoc_db = SnpsAssociation(peak=peak, snps_association=snps_assoc)
             snps_assoc_db.save()
-    return analysis_id, history_id
+    return analysis_id, history_id, snps_assoc_db.id
 
 
 def threshold_gen(thresholds, analysis):