Commit e10a0af2 authored by Blaise Li's avatar Blaise Li
Browse files

Reverting snakefiles to use plain wrappers_dir.

parent 182bef09
...@@ -427,7 +427,7 @@ rule map_on_genome: ...@@ -427,7 +427,7 @@ rule map_on_genome:
err = OPJ(log_dir, "{trimmer}", aligner, "map_{read_type}_on_genome", "{lib}_{rep}.err"), err = OPJ(log_dir, "{trimmer}", aligner, "map_{read_type}_on_genome", "{lib}_{rep}.err"),
threads: 12 threads: 12
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
rule remap_on_genome: rule remap_on_genome:
...@@ -455,7 +455,7 @@ rule remap_on_genome: ...@@ -455,7 +455,7 @@ rule remap_on_genome:
err = OPJ(log_dir, "{trimmer}", aligner, "remap_{read_type}_unmapped_on_genome", "{lib}_{rep}.err"), err = OPJ(log_dir, "{trimmer}", aligner, "remap_{read_type}_unmapped_on_genome", "{lib}_{rep}.err"),
threads: 12 threads: 12
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
rule sam2indexedbam: rule sam2indexedbam:
...@@ -474,7 +474,7 @@ rule sam2indexedbam: ...@@ -474,7 +474,7 @@ rule sam2indexedbam:
resources: resources:
mem_mb=4100 mem_mb=4100
wrapper: wrapper:
f"file://{wrappers_dir[0]}/sam2indexedbam" f"file://{wrappers_dir}/sam2indexedbam"
rule compute_mapping_stats: rule compute_mapping_stats:
......
...@@ -170,7 +170,7 @@ rule map_on_genome: ...@@ -170,7 +170,7 @@ rule map_on_genome:
threads: threads:
4 4
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
rule sam2indexedbam: rule sam2indexedbam:
...@@ -189,7 +189,7 @@ rule sam2indexedbam: ...@@ -189,7 +189,7 @@ rule sam2indexedbam:
resources: resources:
mem_mb=4100 mem_mb=4100
wrapper: wrapper:
f"file://{wrappers_dir[0]}/sam2indexedbam" f"file://{wrappers_dir}/sam2indexedbam"
def biotype2annot(wildcards): def biotype2annot(wildcards):
...@@ -396,7 +396,7 @@ rule compute_RPK: ...@@ -396,7 +396,7 @@ rule compute_RPK:
# rpk = 1000 * counts_data.loc[common].div(feature_lengths.loc[common]["union_exon_len"], axis="index") # rpk = 1000 * counts_data.loc[common].div(feature_lengths.loc[common]["union_exon_len"], axis="index")
# rpk.to_csv(output.rpk_file, sep="\t") # rpk.to_csv(output.rpk_file, sep="\t")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/compute_RPK" f"file://{wrappers_dir}/compute_RPK"
# Compute TPM using total number of mappers divided by genome length # Compute TPM using total number of mappers divided by genome length
...@@ -418,7 +418,7 @@ rule compute_TPM: ...@@ -418,7 +418,7 @@ rule compute_TPM:
# tpm = 1000000 * rpk / rpk.sum() # tpm = 1000000 * rpk / rpk.sum()
# tpm.to_csv(output.tpm_file, sep="\t") # tpm.to_csv(output.tpm_file, sep="\t")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/compute_TPM" f"file://{wrappers_dir}/compute_TPM"
# TODO: Is it better to compute the mean and then the fold of the means? # TODO: Is it better to compute the mean and then the fold of the means?
......
...@@ -491,7 +491,7 @@ rule map_on_genome: ...@@ -491,7 +491,7 @@ rule map_on_genome:
# eval ${{cmd}} 1>> {log.log} 2>> {log.err} # eval ${{cmd}} 1>> {log.log} 2>> {log.err}
# """ # """
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
rule sam2indexedbam: rule sam2indexedbam:
...@@ -510,7 +510,7 @@ rule sam2indexedbam: ...@@ -510,7 +510,7 @@ rule sam2indexedbam:
resources: resources:
mem_mb=4100 mem_mb=4100
wrapper: wrapper:
f"file://{wrappers_dir[0]}/sam2indexedbam" f"file://{wrappers_dir}/sam2indexedbam"
rule fuse_bams: rule fuse_bams:
...@@ -655,7 +655,7 @@ rule htseq_count_reads: ...@@ -655,7 +655,7 @@ rule htseq_count_reads:
log = OPJ(log_dir, "{trimmer}", "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.log"), log = OPJ(log_dir, "{trimmer}", "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.log"),
err = OPJ(log_dir, "{trimmer}", "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.err") err = OPJ(log_dir, "{trimmer}", "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.err")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/htseq_count_reads" f"file://{wrappers_dir}/htseq_count_reads"
def parse_htseq_counts(counts_filename): def parse_htseq_counts(counts_filename):
...@@ -964,7 +964,7 @@ rule compute_RPK: ...@@ -964,7 +964,7 @@ rule compute_RPK:
# rpk = 1000 * counts_data.loc[common].div(feature_lengths.loc[common]["union_exon_len"], axis="index") # rpk = 1000 * counts_data.loc[common].div(feature_lengths.loc[common]["union_exon_len"], axis="index")
# rpk.to_csv(output.rpk_file, sep="\t") # rpk.to_csv(output.rpk_file, sep="\t")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/compute_RPK" f"file://{wrappers_dir}/compute_RPK"
rule compute_sum_million_RPK: rule compute_sum_million_RPK:
...@@ -995,7 +995,7 @@ rule compute_TPM: ...@@ -995,7 +995,7 @@ rule compute_TPM:
# tpm = 1000000 * rpk / rpk.sum() # tpm = 1000000 * rpk / rpk.sum()
# tpm.to_csv(output.tpm_file, sep="\t") # tpm.to_csv(output.tpm_file, sep="\t")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/compute_TPM" f"file://{wrappers_dir}/compute_TPM"
@wc_applied @wc_applied
......
...@@ -566,7 +566,7 @@ rule map_on_genome: ...@@ -566,7 +566,7 @@ rule map_on_genome:
#shell: #shell:
# mapping_command(aligner) # mapping_command(aligner)
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
rule extract_nomap_polyU: rule extract_nomap_polyU:
...@@ -619,7 +619,7 @@ rule remap_on_genome: ...@@ -619,7 +619,7 @@ rule remap_on_genome:
#shell: #shell:
# mapping_command(aligner) # mapping_command(aligner)
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
def source_sam(wildcards): def source_sam(wildcards):
...@@ -659,7 +659,7 @@ rule sam2indexedbam: ...@@ -659,7 +659,7 @@ rule sam2indexedbam:
threads: threads:
8 8
wrapper: wrapper:
f"file://{wrappers_dir[0]}/sam2indexedbam" f"file://{wrappers_dir}/sam2indexedbam"
rule compute_mapping_stats: rule compute_mapping_stats:
...@@ -838,7 +838,7 @@ def biotype2annot(wildcards): ...@@ -838,7 +838,7 @@ def biotype2annot(wildcards):
# log = OPJ(log_dir, "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.log"), # log = OPJ(log_dir, "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.log"),
# err = OPJ(log_dir, "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.err"), # err = OPJ(log_dir, "htseq_count_reads", "{lib}_{rep}_{biotype}_{orientation}.err"),
# wrapper: # wrapper:
# f"file://{wrappers_dir[0]}/htseq_count_reads" # f"file://{wrappers_dir}/htseq_count_reads"
def source_sorted_bam(wildcards): def source_sorted_bam(wildcards):
...@@ -984,7 +984,7 @@ rule feature_count_reads: ...@@ -984,7 +984,7 @@ rule feature_count_reads:
# err = OPJ(log_dir, "intersect_count_reads", "{lib}_{rep}_{biotype}_{orientation}.err"), # err = OPJ(log_dir, "intersect_count_reads", "{lib}_{rep}_{biotype}_{orientation}.err"),
# threads: 4 # to limit memory usage, actually # threads: 4 # to limit memory usage, actually
# wrapper: # wrapper:
# f"file://{wrappers_dir[0]}/intersect_count_reads" # f"file://{wrappers_dir}/intersect_count_reads"
rule summarize_counts: rule summarize_counts:
...@@ -1108,7 +1108,7 @@ rule compute_RPK: ...@@ -1108,7 +1108,7 @@ rule compute_RPK:
# rpk = 1000 * counts_data.loc[common].div(feature_lengths.loc[common]["union_exon_len"], axis="index") # rpk = 1000 * counts_data.loc[common].div(feature_lengths.loc[common]["union_exon_len"], axis="index")
# rpk.to_csv(output.rpk_file, sep="\t") # rpk.to_csv(output.rpk_file, sep="\t")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/compute_RPK" f"file://{wrappers_dir}/compute_RPK"
rule compute_sum_million_RPK: rule compute_sum_million_RPK:
...@@ -1143,7 +1143,7 @@ rule compute_TPM: ...@@ -1143,7 +1143,7 @@ rule compute_TPM:
# tpm = 1000000 * rpk / rpk.sum() # tpm = 1000000 * rpk / rpk.sum()
# tpm.to_csv(output.tpm_file, sep="\t") # tpm.to_csv(output.tpm_file, sep="\t")
wrapper: wrapper:
f"file://{wrappers_dir[0]}/compute_TPM" f"file://{wrappers_dir}/compute_TPM"
# Useful to compute translation efficiency in the Ribo-seq pipeline # Useful to compute translation efficiency in the Ribo-seq pipeline
......
...@@ -752,7 +752,7 @@ rule map_on_genome: ...@@ -752,7 +752,7 @@ rule map_on_genome:
threads: threads:
4 4
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
def source_sam(wildcards): def source_sam(wildcards):
...@@ -786,7 +786,7 @@ rule sam2indexedbam: ...@@ -786,7 +786,7 @@ rule sam2indexedbam:
resources: resources:
mem_mb=4100 mem_mb=4100
wrapper: wrapper:
f"file://{wrappers_dir[0]}/sam2indexedbam" f"file://{wrappers_dir}/sam2indexedbam"
rule compute_mapping_stats: rule compute_mapping_stats:
......
...@@ -1013,7 +1013,7 @@ rule map_on_genome: ...@@ -1013,7 +1013,7 @@ rule map_on_genome:
resources: resources:
mem_mb=700 mem_mb=700
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
rule extract_nomap_siRNAs: rule extract_nomap_siRNAs:
...@@ -1066,7 +1066,7 @@ rule remap_on_genome: ...@@ -1066,7 +1066,7 @@ rule remap_on_genome:
resources: resources:
mem_mb=700 mem_mb=700
wrapper: wrapper:
f"file://{wrappers_dir[0]}/map_on_genome" f"file://{wrappers_dir}/map_on_genome"
def source_sam(wildcards): def source_sam(wildcards):
...@@ -1101,7 +1101,7 @@ rule sam2indexedbam: ...@@ -1101,7 +1101,7 @@ rule sam2indexedbam:
resources: resources:
mem_mb=4100 mem_mb=4100
wrapper: wrapper:
f"file://{wrappers_dir[0]}/sam2indexedbam" f"file://{wrappers_dir}/sam2indexedbam"
rule compute_coverage: rule compute_coverage:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment