libhts.py 30.8 KB
Newer Older
Blaise Li's avatar
Blaise Li committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright (C) 2020 Blaise Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.
15
from math import floor, ceil, sqrt, log
16
from functools import reduce
Blaise Li's avatar
Blaise Li committed
17
# from re import sub
Blaise Li's avatar
Blaise Li committed
18
import warnings
19
import numpy as np
20
import pandas as pd
21
22
# To compute bins in bigwig data
from scipy.stats import binned_statistic
Blaise Li's avatar
Blaise Li committed
23
24
# To compute correlation coefficient, and compute linear regression
from scipy.stats.stats import pearsonr, linregress
Blaise Li's avatar
Blaise Li committed
25
26
# To compute geometric mean
from scipy.stats.mstats import gmean
Blaise Li's avatar
Blaise Li committed
27
import matplotlib as mpl
28
import matplotlib.pyplot as plt
29
# TODO: set this at the "correct" place
Blaise Li's avatar
Blaise Li committed
30
# https://stackoverflow.com/a/42768093/1878788
31
32
33
34
35
36
37
38
39
40
41
#from matplotlib.backends.backend_pgf import FigureCanvasPgf
#mpl.backend_bases.register_backend('pdf', FigureCanvasPgf)
#TEX_PARAMS = {
#    "text.usetex": True,            # use LaTeX to write all text
#    "pgf.rcfonts": False,           # Ignore Matplotlibrc
#    "pgf.texsystem": "lualatex",  # hoping to avoid memory issues
#    "pgf.preamble": [
#        r'\usepackage{color}'     # xcolor for colours
#    ]
#}
#mpl.rcParams.update(TEX_PARAMS)
42
import seaborn as sns
43
44
45
46
47
# from rpy2.robjects import r, pandas2ri, Formula, StrVector
# as_df = r("as.data.frame")
# from rpy2.rinterface import RRuntimeError
# from rpy2.robjects.packages import importr
# deseq2 = importr("DESeq2")
48
from pybedtools import BedTool
49
import pyBigWig
50
import networkx as nx
Blaise Li's avatar
Blaise Li committed
51
52
53
54
55
56
57
from libworkflows import texscape


def formatwarning(
        message, category, filename, lineno, line):  # pylint: disable=W0613
    """Used to format warning messages."""
    return "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
58
59


Blaise Li's avatar
Blaise Li committed
60
61
62
63
64
65
warnings.formatwarning = formatwarning


# This might represent any type of genomic interval.
class Exon():
    """Object representing an exon."""
66
67
68
69
70
71
72
    __slots__ = ("chrom", "start", "end")
    def __init__(self, chrom, start, end):
        self.chrom = chrom
        self.start = start
        self.end = end

    def overlap(self, other):
Blaise Li's avatar
Blaise Li committed
73
74
75
        """
        Tell whether *self* and *other* overlap.
        """
76
77
78
79
80
        if self.chrom != other.chrom:
            return False
        return (self.start <= other.start < self.end) or (other.start <= self.start < other.end)

    def merge(self, other):
Blaise Li's avatar
Blaise Li committed
81
82
83
        """
        Create a new Exon object by merging *self* with *other*.
        """
84
85
86
87
88
89
90
        # Not necessary: can be indirectly linked
        #assert overlap(self, other)
        return Exon(self.chrom, min(self.start, other.start), max(self.end, other.end))

    def __len__(self):
        return self.end - self.start

Blaise Li's avatar
Blaise Li committed
91
92
OVERLAP = Exon.overlap
MERGE = Exon.merge
93

Blaise Li's avatar
Blaise Li committed
94
class Gene():
95
96
97
98
99
100
101
102
103
104
105
106
107
108
    """This object contains information obtained from a gtf file."""
    __slots__ = ("gene_id", "exons", "union_exon_length")
    def __init__(self, gene_id):
        self.gene_id = gene_id
        #self.transcripts = {}
        self.exons = nx.Graph()
        self.union_exon_length = None

    #def add_transcript(self, feature):
    #    the_id = feature.attrs["transcript_id"]
    #    assert the_id not in self.transcripts
    #    self.transcripts[the_id] = feature

    def add_exon(self, feature):
Blaise Li's avatar
Blaise Li committed
109
110
111
112
        """
        Add one Exon object to the exon graph based in the information in gtf
        information *feature*.
        """
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        #the_id = feature.attrs["exon_id"]
        #assert the_id not in self.exons
        #self.exons[the_id] = feature
        exon = Exon(feature.chrom, feature.start, feature.end)
        if exon not in self.exons:
            self.exons.add_node(exon)

    # The merging cannot be done on the full BedTool because we dont want
    # to merge together exons not belonging to the same gene.
    def set_union_exon_length(self):
        """The exons are used to make a BedTool, which enables convenient merging of
        overlapping features. The sum of the lengths of the merged exons is returned."""
        if len(self.exons) == 1:
            # No need to merge when there is only one exon
            self.union_exon_length = len(next(iter(self.exons.nodes())))
        else:
            # Too slow
            #self.union_exon_length = sum(map(
            #    len, BedTool(self.exons.values()).merge().features()))
            #self.union_exon_length = 0
            # We group nodes that overlap, and merge them
Blaise Li's avatar
Blaise Li committed
134
            #overlapping_exons = nx.quotient_graph(self.exons, OVERLAP)
135
            #for node in overlapping_exons.nodes():
Blaise Li's avatar
Blaise Li committed
136
            #    mex = reduce(MERGE, node)
137
138
            #    self.union_exon_length += len(mex)
            self.union_exon_length = sum((len(reduce(
Blaise Li's avatar
Blaise Li committed
139
140
                MERGE, node)) for node in nx.quotient_graph(
                    self.exons, OVERLAP).nodes()))
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167


def gtf_2_genes_exon_lengths(gtf_filename):
    """Returns a pandas DataFrame where union exon lengths are associated to gene IDs."""
    gtf_file = open(gtf_filename, "r")
    gtf = BedTool(gtf_file)
    genes = {}
    for feature in gtf.features():
        feat_type = feature[2]
        if feat_type != "exon":
            continue
        attrs = feature.attrs
        gene_id = attrs["gene_id"]
        if gene_id not in genes:
            genes[gene_id] = Gene(gene_id)
        gene = genes[gene_id]
        try:
            gene.add_exon(feature)
        except AssertionError:
            # A given exon may be registered for several transcripts, hence several gtf entries
            already = gene.exons[feature.attrs["exon_id"]]
            assert already.attrs["transcript_id"] != feature.attrs["transcript_id"]
            assert (already.start, already.end) == (feature.start, feature.end)
    for gene in genes.values():
        gene.set_union_exon_length()
    return pd.DataFrame(pd.Series(
        {gene.gene_id : gene.union_exon_length for gene in genes.values()},
168
        name=("union_exon_len")).rename_axis("gene"))
169
170
171


def repeat_bed_2_lengths(repeat_bed):
Blaise Li's avatar
Blaise Li committed
172
    """Computes the lengths of repetitive elements in a bed file, grouped by families.
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
    This assumes that the elements have their names composed of the family name,
    then a colon, then a number. For instance:
    Simple_repeat|Simple_repeat|(TTTTTTG)n:1
    Simple_repeat|Simple_repeat|(TTTTTTG)n:2
    Simple_repeat|Simple_repeat|(TTTTTTG)n:3
    Simple_repeat|Simple_repeat|(TTTTTTG)n:4
    -> Simple_repeat|Simple_repeat|(TTTTTTG)n
    Returns a DataFrame associating the summed lengths to the family names.
    """
    # usecols=[1, 2, 3]: start, end, id
    # index_col=2: id (relative to the selected columns)
    start_ends = pd.read_table(repeat_bed, usecols=[1, 2, 3], header=None, index_col=2)
    # bed lengths
    lens = start_ends[2] - start_ends[1]
    lens.name = "union_exon_len"
    repeat_families = [":".join(name.split(":")[:-1]) for name in start_ends.index]
    # The reads assigned to a repeated element can come
    # from the summed length of all the members of the family
    # We call this "gene" for convenience and compatibility
    return pd.DataFrame(lens).assign(gene=repeat_families).groupby("gene").sum()
193

Blaise Li's avatar
Blaise Li committed
194

195
196
def spikein_gtf_2_lengths(spikein_gtf):
    """Computes the lengths of spike-ins, grouped by families.
197
    Returns a DataFrame associating the summed lengths to the spike-in names.
198
199
200
201
202
203
204
205
206
207
208
    """
    spikein_ends = {}
    with open(spikein_gtf) as gtf_file:
        for line in gtf_file:
            fields = line.strip().split("\t")
            spikein_ends[fields[0]] = int(fields[4])
    return pd.DataFrame(pd.Series(
        spikein_ends,
        name=("union_exon_len")).rename_axis("gene"))


Blaise Li's avatar
Blaise Li committed
209
210
211
def id_list_gtf2bed(
        identifiers, gtf_filename,
        feature_type="transcript", id_kwd="gene_id"):
212
213
214
215
216
217
218
219
220
221
222
    """
    Extract bed coordinates of an iterable of identifiers from a gtf file.

    *identifiers* is the iterable of identifiers.
    *gtf_filename* is the name of the gtf file.
    *feature_type* is the type of feature to be considered
    in the gtf file (third columns).
    *id_kwd* is the keyword under which the feature ID is expected to be found
    in the feature annotations in the gtf_file. These feature IDs will be
    matched against the elements in *identifiers*.
    """
223
224
    if identifiers:
        ids = set(identifiers)
Blaise Li's avatar
Blaise Li committed
225

226
227
228
229
        def feature_filter(feature):
            return feature[2] == feature_type and feature[id_kwd] in ids
        gtf = BedTool(gtf_filename)
        return gtf.filter(feature_filter)
Blaise Li's avatar
Blaise Li committed
230
231
232
233
234
235

    # https://stackoverflow.com/a/13243870/1878788
    def empty_bed_generator():
        return
        yield  # pylint: disable=W0101
    return empty_bed_generator()
236
237


238
def make_empty_bigwig(filename, chrom_sizes):
239
240
241
242
    """Writes *filename* so that it is an empty bigwig file.
    *chrom_sizes is a dictionary giving chromosome sizes* given
    chomosome names.
    """
243
244
    bw_out = pyBigWig.open(filename, "w")
    bw_out.addHeader(list(chrom_sizes.items()))
245
246
    # for (chrom, chrom_len) in bw_out.chroms().items():
    for (chrom, chrom_len) in chrom_sizes.items():
247
248
249
250
        bw_out.addEntries(
            chrom, 0,
            values=np.nan_to_num(np.zeros(chrom_len)[0::10]),
            span=10, step=10)
251
252
253
    bw_out.close()


254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
# Possible improvements: use an itearble of (from_region, to_region) pairs,
# or two zippable iterables
def paste_bigwig_region(
        from_fname, to_fname, from_region, to_region,
        dest_fname, nanmean=False):
    """

    Take values from a region *from_region* in a bigwig file *from_fname* and
    paste them into a region *to_region* in another bigwig file *to_fname*, and
    write the results in a third bigwig file *dest_fname*.

    A region should be specified as a (chrom, start, stop) triplet, where start
    is zero-based and stop is 1-based (like BED coordinates or Python slices).

    *dest_fname* will have the same chromosomes as *to_fname*.
    The values in *to_region* are substituted.

    Current limitation: The regions should have the same length.
    Option *nanmean* is likely not working correctly.
    """
    (from_chrom, from_start, from_stop) = from_region
    from_len = from_stop - from_start
    (to_chrom, to_start, to_stop) = to_region
    assert to_stop - to_start == from_len, (
        "Regions should have the same lengths.")
    from_bw = pyBigWig.open(from_fname)
    to_bw = pyBigWig.open(to_fname)
    chrom_sizes = list(to_bw.chroms().items())
    dest_bw = pyBigWig.open(dest_fname, "w")
    dest_bw.addHeader(chrom_sizes)
    for (chrom, chrom_len) in chrom_sizes:
        nb_bins = ceil(chrom_len / 10)
        # values = to_bw.values(chrom, 0, chrom_len)
        # Original values, plus some nans for binned_statistics to properly set the bin boundaries
        values = np.pad(
            to_bw.values(chrom, 0, chrom_len),
            # pad zero on the left, and what is needed to complete the bin on the right
            (0, 10 * nb_bins - chrom_len),
            constant_values=np.nan)
        if chrom == to_chrom:
            # Replace the values in the desired region
            values[to_start:to_stop] = from_bw.values(
                from_chrom, from_start, from_stop)
        if nanmean:
            bin_means = binned_statistic(
                range(0, 10 * nb_bins), values,
                statistic=np.nanmean, bins=nb_bins).statistic
        else:
            bin_means = binned_statistic(
                # range(0, chrom_len), np.nan_to_num(values),
                range(0, 10 * nb_bins), values,
                statistic="mean", bins=nb_bins).statistic
        dest_bw.addEntries(
            chrom, 0,
            # Mean for each bin of size 10
            values=bin_means,
            # values=np.nan_to_num(np.zeros(chrom_len)[0::10]),
            span=10, step=10)
    dest_bw.close()
    to_bw.close()
    from_bw.close()


317
318
319
#################
# Bowtie2 stuff #
#################
Blaise Li's avatar
Blaise Li committed
320
321
def zero(value):  # pylint: disable=W0613
    """Constant zero."""
322
323
324
325
    return 0


def identity(value):
Blaise Li's avatar
Blaise Li committed
326
    """Identity function."""
327
328
329
    return value


Blaise Li's avatar
Blaise Li committed
330
BOWTIE2_FUNCTION_SELECTOR = {
331
332
333
334
335
336
337
338
339
    "C": zero,
    "L": identity,
    "S": sqrt,
    "G": log}


def make_seeding_function(seeding_string):
    """Generates a function that computes the seeding pattern given a
    string representing bowtie2 seeding settings (-L and -i options).
Blaise Li's avatar
Blaise Li committed
340

341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
    >>> make_seeding_function("-L 6 -i S,1,0.8")(18)
    [[0, 6], [4, 10], [8, 14], [12, 18]]
    """
    [opt1, val1, opt2, val2] = seeding_string.split()
    if opt1 == "-L":
        assert opt2 == "-i"
        seed_len = int(val1)
        interval_string = val2
    else:
        assert opt2 == "-L"
        seed_len = int(val2)
        interval_string = val1
    [func_type, constant, coeff] = interval_string.split(",")
    constant = float(constant)
    coeff = float(coeff)
Blaise Li's avatar
Blaise Li committed
356
357
    func_type = BOWTIE2_FUNCTION_SELECTOR[func_type]

358
359
360
361
362
363
364
365
366
367
368
369
    def seeding_function(read_len):
        interval = floor(constant + (coeff * func_type(read_len)))
        seeds = []
        seed = [0, seed_len]
        while seed[1] <= read_len:
            seeds.append(seed)
            next_seed_start = seed[0] + interval
            seed = [next_seed_start, next_seed_start + seed_len]
        return seeds
    return seeding_function


370
371
372
373
374
375
def aligner2min_mapq(aligner, wildcards):
    """
    Option to filter on MAPQ value in featureCounts.

    What minimal MAPQ value should a read have to be considered uniquely mapped?
    See <https://sequencing.qcfail.com/articles/mapq-values-are-really-useful-but-their-implementation-is-a-mess/>.
Blaise Li's avatar
Blaise Li committed
376
    """  # pylint: disable=C0301
377
    mapping_type = None
378
379
380
    try:
        mapping_type = wildcards.mapping_type
    except AttributeError:
381
382
383
384
385
386
387
388
389
390
391
392
        pass
    if mapping_type is None:
        try:
            mapping_type = wildcards.mapped_type
        except AttributeError:
            pass
    if mapping_type is None:
        try:
            mapping_type = wildcards.read_type
        except AttributeError:
            pass
    if mapping_type is None or mapping_type.startswith("unique_"):
393
394
        if aligner == "hisat2":
            return "-Q 60"
Blaise Li's avatar
Blaise Li committed
395
        if aligner == "bowtie2":
396
            return "-Q 23"
Blaise Li's avatar
Blaise Li committed
397
398
        raise NotImplementedError(f"{aligner} not handled (yet?)")
    return ""
399
400


Blaise Li's avatar
Blaise Li committed
401
# Not sure this is a good idea...
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
# def masked_gmean(a, axis=0, dtype=None):
#     """Modified from stats.py."""
#     # Converts the data into a masked array
#     ma = np.ma.masked_invalid(a)
#     # Apply gmean
#     if not isinstance(ma, np.ndarray):
#         # if not an ndarray object attempt to convert it
#         log_a = np.log(np.array(ma, dtype=dtype))
#     elif dtype:
#         # Must change the default dtype allowing array type
#         if isinstance(ma, np.ma.MaskedArray):
#             log_a = np.log(np.ma.asarray(ma, dtype=dtype))
#         else:
#             log_a = np.log(np.asarray(ma, dtype=dtype))
#     else:
#         log_a = np.log(ma)
#     return np.exp(log_a.mean(axis=axis))


Blaise Li's avatar
Blaise Li committed
421
def median_ratio_to_pseudo_ref_size_factors(counts_data):
Blaise Li's avatar
Blaise Li committed
422
423
424
425
426
    """Adapted from DESeq paper (doi:10.1186/gb-2010-11-10-r106)
    All libraries are used to define a pseudo-reference, which has
    the geometric mean across libraries for a given gene in *counts_data*.
    For a given library, the median across genes of the ratios to the
    pseudo-reference is used as size factor."""
Blaise Li's avatar
Blaise Li committed
427
    # Add pseudo-count to compute the geometric mean, then remove it
Blaise Li's avatar
Blaise Li committed
428
    # pseudo_ref = (counts_data + 1).apply(gmean, axis=1) - 1
Blaise Li's avatar
Blaise Li committed
429
    # Ignore lines with zeroes instead (may be bad for IP: many zeroes expected):
Blaise Li's avatar
Blaise Li committed
430
    pseudo_ref = (counts_data[counts_data.prod(axis=1) > 0]).apply(gmean, axis=1)
431
432
    # Ignore lines with only zeroes
    # pseudo_ref = (counts_data[counts_data.sum(axis=1) > 0]).apply(masked_gmean, axis=1)
Blaise Li's avatar
Blaise Li committed
433

Blaise Li's avatar
Blaise Li committed
434
435
    def median_ratio_to_pseudo_ref(col):
        return (col / pseudo_ref).median()
Blaise Li's avatar
Blaise Li committed
436
    # size_factors = counts_data.apply(median_ratio_to_pseudo_ref, axis=0)
437
    median_ratios = counts_data[counts_data.prod(axis=1) > 0].apply(
Blaise Li's avatar
Blaise Li committed
438
        median_ratio_to_pseudo_ref, axis=0)
439
440
441
442
443
    # Not sure fillna(0) is appropriate
    if any(median_ratios.isna()):
        msg = "Could not compute median ratios to pseudo reference.\n"
        warnings.warn(msg)
        return median_ratios.fillna(1)
Blaise Li's avatar
Blaise Li committed
444
    return median_ratios
Blaise Li's avatar
Blaise Li committed
445

446
447
448
449
450
451
452
453
454

def size_factor_correlations(counts_data, summaries, normalizer):
    """Is there a correlation, across libraries, between normalized values and size factors?
    The size factor type *normalizer* is either computed or taken from *summaries*.
    The normalized data are computed by dividing *counts_data* by this size factor."""
    if normalizer == "median_ratio_to_pseudo_ref":
        size_factors = median_ratio_to_pseudo_ref_size_factors(counts_data)
    else:
        size_factors = summaries.loc[normalizer]
Blaise Li's avatar
Blaise Li committed
455
456
    # by_norm = counts_data / size_factors

457
458
    def compute_pearsonr_with_size_factor(row):
        return pearsonr(row, size_factors)[0]
Blaise Li's avatar
Blaise Li committed
459
    # return by_norm.apply(compute_pearsonr_with_size_factor, axis=1)
460
461
462
    return (counts_data / size_factors).apply(compute_pearsonr_with_size_factor, axis=1)


463
def plot_norm_correlations(correlations):
Blaise Li's avatar
Blaise Li committed
464
465
466
467
468
469
470
    """
    Make violin plots to represent data in *correlations*.
    """
    # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
    # correlations.plot.kde(ax=ax1)
    # sns.violinplot(data=correlations, orient="h", ax=ax2)
    # ax2.set_xlabel("Pearson correlation coefficient")
Blaise Li's avatar
Blaise Li committed
471
472
    usetex = mpl.rcParams.get("text.usetex", False)
    if usetex:
473
        correlations.columns = [texscape(colname) for colname in correlations.columns]
Blaise Li's avatar
Blaise Li committed
474
475
    axis = sns.violinplot(data=correlations, cut=0)
    axis.set_ylabel("Pearson correlation coefficient")
476
477
478


def plot_counts_distribution(data, xlabel):
Blaise Li's avatar
Blaise Li committed
479
480
481
    """
    Plot a kernel density estimate of the distribution of counts in *data*.
    """
482
    # TODO: try to plot with semilog x axis
Blaise Li's avatar
Blaise Li committed
483
484
485
    # axis = data.plot.kde(legend=None)
    # axis.set_xlabel(xlabel)
    # axis.legend(ncol=len(REPS))
Blaise Li's avatar
Blaise Li committed
486
487
    usetex = mpl.rcParams.get("text.usetex", False)
    if usetex:
488
489
        xlabel = texscape(xlabel)
        data.columns = [texscape(colname) for colname in data.columns]
490
    try:
Blaise Li's avatar
Blaise Li committed
491
        axis = data.plot.kde()
Blaise Li's avatar
Blaise Li committed
492
493
    # except ValueError as e:
    except ValueError:
494
495
496
497
498
        msg = "".join([
            "There seems to be a problem with the data.\n",
            "The data matrix has %d lines and %d columns.\n" % (len(data), len(data.columns))])
        warnings.warn(msg)
        raise
Blaise Li's avatar
Blaise Li committed
499
    axis.set_xlabel(xlabel)
500
501


502
def plot_histo(outfile, data, title=None):
Blaise Li's avatar
Blaise Li committed
503
504
505
506
507
508
509
    """
    Plot a histogram of *data* in file *outfile*.
    """
    fig = plt.figure(figsize=(15, 7))
    axis = fig.add_subplot(111)
    axis.set_xlim([data.index[0] - 0.5, data.index[-1] + 0.5])
    # axis.set_ylim([0, 100])
510
    bar_width = 0.8
Blaise Li's avatar
Blaise Li committed
511
    # letter2legend = dict(zip("ACGT", "ACGT"))
512
513
514
    usetex = mpl.rcParams.get("text.usetex", False)
    if usetex:
        data.columns = [texscape(colname) for colname in data.columns]
Blaise Li's avatar
Blaise Li committed
515
        title = texscape(title)
516
517
518
519
520
521
    for (read_len, count) in data.iterrows():
        plt.bar(
            read_len,
            count,
            align="center",
            width=bar_width)
Blaise Li's avatar
Blaise Li committed
522
523
524
525
526
527
528
529
            # color=letter2colour[letter],
            # label=letter2legend[letter])
    axis.legend()
    axis.set_xticks(data.index)
    axis.set_xticklabels(data.index)
    axis.set_xlabel("read length")
    axis.set_ylabel("number of reads")
    plt.setp(axis.get_xticklabels(), rotation=90)
530
531
    if title is not None:
        plt.title(title)
532
533
534
    ## debug
    try:
        plt.savefig(outfile)
Blaise Li's avatar
Blaise Li committed
535
536
    # except RuntimeError as e:
    except RuntimeError:
537
538
539
540
        print(data.index)
        print(title)
        raise
    ##
541
542


543
def plot_boxplots(data, ylabel):
Blaise Li's avatar
Blaise Li committed
544
545
546
    """
    Plot boxplots of data in *data* using *ylabel* as y-axis label.
    """
547
    fig = plt.figure(figsize=(6, 12))
Blaise Li's avatar
Blaise Li committed
548
    axis = fig.add_subplot(111)
Blaise Li's avatar
Blaise Li committed
549
550
    usetex = mpl.rcParams.get("text.usetex", False)
    if usetex:
551
552
        ylabel = texscape(ylabel)
        data.columns = [texscape(colname) for colname in data.columns]
Blaise Li's avatar
Blaise Li committed
553
554
555
    data.plot.box(ax=axis)
    axis.set_ylabel(ylabel)
    for label in axis.get_xticklabels():
556
        label.set_rotation(90)
557
    plt.tight_layout()
558
559


560
561
562
############
# DE stuff #
############
563
564
565
566
567
568
# Cutoffs in log fold change
LFC_CUTOFFS = [0.5, 1, 2]
UP_STATUSES = [f"up{cutoff}" for cutoff in LFC_CUTOFFS]
DOWN_STATUSES = [f"down{cutoff}" for cutoff in LFC_CUTOFFS]


569
570
571
def status_setter(lfc_cutoffs=None, fold_type="log2FoldChange"):
    """*fold_type* can also be "lfcMLE", which is based on uncorrected values.
    This may not be good for genes with low expression levels."""
572
    if lfc_cutoffs is None:
573
        lfc_cutoffs = LFC_CUTOFFS
574
575
576
577
    def set_status(row):
        """Determines the up- or down-regulation status corresponding to a given
        row of a deseq2 results table."""
        if row["padj"] < 0.05:
Blaise Li's avatar
Blaise Li committed
578
            # if row["log2FoldChange"] > 0:
579
            lfc = row[fold_type]
580
            if lfc > 0:
581
582
                # Start from the highest cutoff,
                # and decrease until below lfc
583
584
585
586
                for cutoff in sorted(lfc_cutoffs, reverse=True):
                    if lfc > cutoff:
                        return f"up{cutoff}"
                return "up"
Blaise Li's avatar
Blaise Li committed
587
588
589
590
591
            for cutoff in sorted(lfc_cutoffs, reverse=True):
                if lfc < -cutoff:
                    return f"down{cutoff}"
            return "down"
        return "NS"
592
    return set_status
Blaise Li's avatar
Blaise Li committed
593

594
595
596
597
598
599
600

# res = res.assign(is_DE=res.apply(set_de_status, axis=1))
def set_de_status(row):
    """Determines whether a gene is differentially expressed (DE) of not (NS)
    based on the adjusted p-value in row of a deseq2 results table."""
    if row["padj"] < 0.05:
        return "DE"
Blaise Li's avatar
Blaise Li committed
601
    return "NS"
602
603
DE2COLOUR = {
    # black
Blaise Li's avatar
Blaise Li committed
604
    "DE": "k",
605
    # pale grey
Blaise Li's avatar
Blaise Li committed
606
    "NS": "0.85"}
607
608


609
610
611
612
def plot_lfc_distribution(res, contrast, fold_type=None):
    """*fold_type* is "log2FoldChange" by default.
    It can also be "lfcMLE", which is based on uncorrected values.
    This may not be good for genes with low expression levels."""
Blaise Li's avatar
Blaise Li committed
613
    # lfc = res.lfcMLE.dropna()
614
615
616
    if fold_type is None:
        fold_type = "log2FoldChange"
    lfc = getattr(res, fold_type).dropna()
Blaise Li's avatar
Blaise Li committed
617
618
    usetex = mpl.rcParams.get("text.usetex", False)
    if usetex:
619
620
621
622
        lfc.name = texscape(contrast)
    else:
        lfc.name = contrast
        # lfc.columns = [texscape(colname) for colname in lfc.columns]
Blaise Li's avatar
Blaise Li committed
623
624
625
    axis = sns.kdeplot(lfc)
    axis.set_xlabel(fold_type)
    axis.set_ylabel("frequency")
626
627
628


def make_status2colour(down_statuses, up_statuses):
Blaise Li's avatar
Blaise Li committed
629
630
631
    """
    Generate a dictionary associating colours to statuses.
    """
632
633
634
635
    statuses = list(reversed(down_statuses)) + ["down", "NS", "up"] + up_statuses
    return dict(zip(statuses, sns.color_palette("coolwarm", len(statuses))))


636
STATUS2COLOUR = make_status2colour(DOWN_STATUSES, UP_STATUSES)
Blaise Li's avatar
Blaise Li committed
637
638


639
640
641
642
643
# TODO: use other labelling than logfold or gene lists, i.e. biotype
def plot_MA(res,
            grouping=None,
            group2colour=None,
            mean_range=None,
644
645
646
647
648
            lfc_range=None,
            fold_type=None):
    """*fold_type* is "log2FoldChange" by default.
    It can also be "lfcMLE", which is based on uncorrected values.
    This may not be good for genes with low expression levels."""
Blaise Li's avatar
Blaise Li committed
649
650
    # if not len(res):
    if not res:
651
        raise ValueError("No data to plot.")
Blaise Li's avatar
Blaise Li committed
652
    fig, axis = plt.subplots()
653
    # Make a column indicating whether the gene is DE or NS
Blaise Li's avatar
Blaise Li committed
654
    data = res.assign(is_DE=res.apply(set_de_status, axis=1))
655
    x_column = "baseMean"
656
    data = data.assign(logx=np.log10(data[x_column]))
657
    if fold_type is None:
658
659
660
        y_column = "log2FoldChange"
    else:
        y_column = fold_type
661
    usetex = mpl.rcParams.get("text.usetex", False)
Blaise Li's avatar
Blaise Li committed
662

663
664
665
666
667
    def scatter_group(group, label, colour, size=1):
        """Plots the data in *group* on the scatterplot."""
        if usetex:
            label = texscape(label)
        group.plot.scatter(
Blaise Li's avatar
Blaise Li committed
668
            # x=x_column,
669
670
671
            x="logx",
            y=y_column,
            s=size,
Blaise Li's avatar
Blaise Li committed
672
            # logx=True,
673
            c=colour,
Blaise Li's avatar
Blaise Li committed
674
            label=label, ax=axis)
675
676
677
    if usetex:
        data.columns = [texscape(colname) for colname in data.columns]
        y_column = texscape(y_column)
Blaise Li's avatar
Blaise Li committed
678
        de_status_column = "is\_DE"  # pylint: disable=W1401
679
680
    else:
        de_status_column = "is_DE"
681
    # First plot the data in grey and black
682
683
684
685
    for (de_status, group) in data.groupby(de_status_column):
        label = f"{de_status} ({len(group)})"
        colour = DE2COLOUR[de_status]
        scatter_group(group, label, colour, size=2)
686
    if grouping is not None:
687
688
689
690
        if isinstance(grouping, str):
            # Overlay colours based on the "grouping" column
            if group2colour is None:
                group2colour = STATUS2COLOUR
Blaise Li's avatar
Blaise Li committed
691
            for status, group in data.groupby(grouping):
692
693
694
                label = f"{status} ({len(group)})"
                colour = group2colour[status]
                scatter_group(group, label, colour)
695
696
        else:
            (status, colour) = group2colour
Blaise Li's avatar
Blaise Li committed
697
            row_indices = data.index.intersection(grouping)
698
            try:
Blaise Li's avatar
Blaise Li committed
699
                label = f"{status} ({len(row_indices)})"
700
                scatter_group(data.ix[row_indices], label, colour)
Blaise Li's avatar
Blaise Li committed
701
702
            except ValueError as err:
                if str(err) != "scatter requires x column to be numeric":
703
704
                    print(data.ix[row_indices])
                    raise
Blaise Li's avatar
Blaise Li committed
705
706
707
                warnings.warn(f"Nothing to plot for {status}\n")
    axis.axhline(y=1, linewidth=0.5, color="0.5", linestyle="dashed")
    axis.axhline(y=-1, linewidth=0.5, color="0.5", linestyle="dashed")
708
    # TODO: check data basemean range
709
    if mean_range is not None:
Blaise Li's avatar
Blaise Li committed
710
711
        # axis.set_xlim(mean_range)
        axis.set_xlim(np.log10(mean_range))
712
    if lfc_range is not None:
713
        (lfc_min, lfc_max) = lfc_range
714
715
        lfc_here_min = getattr(data, y_column).min()
        lfc_here_max = getattr(data, y_column).max()
716
        if (lfc_here_min < lfc_min) or (lfc_here_max > lfc_max):
Blaise Li's avatar
Blaise Li committed
717
718
719
720
            warnings.warn(
                f"Cannot plot {y_column} data "
                f"([{lfc_here_min}, {lfc_here_max}]) in requested range "
                f"([{lfc_min}, {lfc_max}])\n")
721
        else:
Blaise Li's avatar
Blaise Li committed
722
            axis.set_ylim(lfc_range)
723
724
725
726
727
728
729
    # https://stackoverflow.com/a/24867320/1878788
    x_ticks = np.arange(
        floor(np.ma.masked_invalid(data["logx"]).min()),
        ceil(np.ma.masked_invalid(data["logx"]).max()),
        1)
    x_ticklabels = [r"$10^{{{}}}$".format(tick) for tick in x_ticks]
    plt.xticks(x_ticks, x_ticklabels)
Blaise Li's avatar
Blaise Li committed
730
    axis.set_xlabel(x_column)
731

732

Blaise Li's avatar
Blaise Li committed
733
734
735
736
737
738
739
def plot_scatter(data,
                 x_column,
                 y_column,
                 regression=False,
                 grouping=None,
                 group2colour=None,
                 x_range=None,
740
741
                 y_range=None,
                 axes_style=None):
Blaise Li's avatar
Blaise Li committed
742
743
744
745
746
747
748
    """
    Plot a scatterplot using data from *data*, using columns
    """
    # No rows
    # if not len(data):
    # Does it work like that too?
    if not data:
749
        raise ValueError("No data to plot.")
Blaise Li's avatar
Blaise Li committed
750
751
752
    # fig, axis = plt.subplots()
    _, axis = plt.subplots()
    # axis.set_adjustable('box')
Blaise Li's avatar
Blaise Li committed
753
    # First plot the data in grey
754
755
756
    data.plot.scatter(
        x=x_column, y=y_column,
        s=2, c="black", alpha=0.15, edgecolors='none',
Blaise Li's avatar
Blaise Li committed
757
        ax=axis)
Blaise Li's avatar
Blaise Li committed
758
759
    if regression:
        linreg = linregress(data[[x_column, y_column]].dropna())
Blaise Li's avatar
Blaise Li committed
760
761
762
        a = linreg.slope  # pylint: disable=C0103
        b = linreg.intercept  # pylint: disable=C0103

Blaise Li's avatar
Blaise Li committed
763
764
765
766
767
768
769
        def fit(x):
            return (a * x) + b
        min_x = data[[x_column]].min()[0]
        max_x = data[[x_column]].max()[0]
        min_y = fit(min_x)
        max_y = fit(max_x)
        xfit, yfit = (min_x, max_x), (min_y, max_y)
Blaise Li's avatar
Blaise Li committed
770
771
772
        axis.plot(
            xfit, yfit,
            linewidth=0.5, color="0.5", linestyle="dashed")
Blaise Li's avatar
Blaise Li committed
773
774
775
776
777
778
    # Overlay colour points
    if grouping is not None:
        if isinstance(grouping, str):
            # Determine colours based on the "grouping" column
            if group2colour is None:
                statuses = data[grouping].unique()
Blaise Li's avatar
Blaise Li committed
779
780
781
                group2colour = dict(zip(
                    statuses,
                    sns.color_palette("colorblind", len(statuses))))
782
            for (status, group) in data.groupby(grouping):
Blaise Li's avatar
Blaise Li committed
783
784
                group.plot.scatter(
                    x=x_column, y=y_column, s=1, c=group2colour[status],
Blaise Li's avatar
Blaise Li committed
785
                    label=f"{status} ({len(group)})", ax=axis)
Blaise Li's avatar
Blaise Li committed
786
        else:
787
            # Apply a colour to a list of genes
Blaise Li's avatar
Blaise Li committed
788
789
            (status, colour) = group2colour
            row_indices = data.index.intersection(grouping)
790
791
792
            try:
                data.ix[row_indices].plot.scatter(
                    x=x_column, y=y_column, s=1, c=colour,
Blaise Li's avatar
Blaise Li committed
793
794
795
                    label=f"{status} ({len(row_indices)})", ax=axis)
            except ValueError as err:
                if str(err) != "scatter requires x column to be numeric":
796
797
                    print(data.ix[row_indices])
                    raise
Blaise Li's avatar
Blaise Li committed
798
                warnings.warn(f"Nothing to plot for {status}\n")
799
800
    if axes_style is None:
        axes_style = {"linewidth": 0.5, "color": "0.5", "linestyle": "dashed"}
Blaise Li's avatar
Blaise Li committed
801
802
803
804
    axis.axhline(y=0, **axes_style)
    axis.axvline(x=0, **axes_style)
    # axis.axhline(y=0, linewidth=0.5, color="0.5", linestyle="dashed")
    # axis.axvline(x=0, linewidth=0.5, color="0.5", linestyle="dashed")
Blaise Li's avatar
Blaise Li committed
805
806
807
808
809
810
    # Set axis limits
    if x_range is not None:
        (x_min, x_max) = x_range
        x_here_min = getattr(data, x_column).min()
        x_here_max = getattr(data, x_column).max()
        if (x_here_min < x_min) or (x_here_max > x_max):
Blaise Li's avatar
Blaise Li committed
811
812
813
814
            warnings.warn(
                f"Cannot plot {x_column} data "
                f"([{x_here_min}, {x_here_max}]) in requested range "
                f"([{x_min}, {x_max}])\n")
Blaise Li's avatar
Blaise Li committed
815
        else:
Blaise Li's avatar
Blaise Li committed
816
            axis.set_xlim(x_range)
Blaise Li's avatar
Blaise Li committed
817
818
819
820
821
    if y_range is not None:
        (y_min, y_max) = y_range
        y_here_min = getattr(data, y_column).min()
        y_here_max = getattr(data, y_column).max()
        if (y_here_min < y_min) or (y_here_max > y_max):
Blaise Li's avatar
Blaise Li committed
822
823
824
            warnings.warn(
                f"Cannot plot {y_column} data ([{y_here_min}, {y_here_max}]) "
                f"in requested range ([{y_min}, {y_max}])\n")
Blaise Li's avatar
Blaise Li committed
825
        else:
Blaise Li's avatar
Blaise Li committed
826
827
            axis.set_ylim(y_range)
    return axis
828

829

830
def plot_paired_scatters(data, columns=None, hue=None, log_log=False):
831
832
833
    """Alternative to pairplot, in order to avoid histograms on the diagonal."""
    if columns is None:
        columns = data.columns
Blaise Li's avatar
Blaise Li committed
834
835
    usetex = mpl.rcParams.get("text.usetex", False)
    if usetex:
836
837
        data.columns = [texscape(colname) for colname in data.columns]
        columns = [texscape(colname) for colname in columns]
Blaise Li's avatar
Blaise Li committed
838
839
840
    grid = sns.PairGrid(data, vars=columns, hue=hue, size=8)
    # grid.map_offdiag(plt.scatter, marker=".")
    grid.map_lower(plt.scatter, marker=".")
841
    if log_log:
Blaise Li's avatar
Blaise Li committed
842
843
844
845
        for axis in grid.axes.ravel():
            axis.set_xscale('log')
            axis.set_yscale('log')
    grid.add_legend()