Commit 87894c3e authored by amichaut's avatar amichaut
Browse files

various bug fixes in Napari associated methods

parent f3633059
......@@ -6,6 +6,7 @@
# Track Analyzer :microscope: :bar_chart:
Quantification and visualization of tracking data
**Track Analyzer** is Python-based data visualization pipeline for tracking data.
It *does not* perform any tracking, but visualizes and quantifies any kind of tracked data.
......@@ -45,7 +46,7 @@ To install **Track Analyzer**, just run on a Terminal (Mac & Linux) or open an A
```sh
conda create -n pyTA python=3.7
conda activate pyTA
pip install track-analyzer
pip install track-analyzer
```
### Installation with a virtualenv
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1573,4 +1573,22 @@ def filter_by_ROI(df, image, filter_all_frames=False, return_ROIs=False):
else:
return df_out
def batch_analysis(dirdata, run_='cell_analysis', refresh=False, invert_yaxis=True):
dirdata_l = tpr.listdir_nohidden(dirdata)
dirdata_l_ = []
for d in dirdata_l:
dirdata_ = osp.join(dirdata, d)
if osp.isdir(dirdata_) and d != 'outdata':
dirdata_l_.append(d)
for i, d in enumerate(dirdata_l_):
dirdata_ = osp.join(dirdata, d)
print("processing directory {} ({}/{})".format(d, i + 1, len(dirdata_l_)))
if run_ == 'cell_analysis':
cell_analysis(dirdata_, no_bkg=True, show_axis=True, plot_vel=None, min_traj_len=10, plot_vs_Y=True,
dont_plot_cells=True, refresh=refresh, invert_yaxis=invert_yaxis)
# analysis_func(dirdata_,no_bkg=True,show_axis=True,plot_vel=None,min_traj_len=10,plot_vs_Y=True,dont_plot_cells=True,dont_set_origin=True)
elif run_ == 'pooled_MSD':
df, lengthscale, timescale, columns, dim = tpr.get_data(dirdata_)
plot_pooled_MSD(data_dir, dt=timescale, plot_method='along_Y')
\ No newline at end of file
......@@ -879,42 +879,42 @@ def view_traj(df, image=None, z_step=1):
:type z_step: float or None
"""
with napari.gui_qt():
axis_labels = ['t', 'z', 'x', 'y'] if 'z' in df.columns else ['t', 'x', 'y']
viewer = napari.Viewer(axis_labels=axis_labels)
# if there is an image to plot on
if image is not None:
if image['image_fn'] is not None:
im = io.imread(image['image_fn'])
# if 3D data
if 'z' in df.columns:
cols = ['frame', 'z', 'y', 'x']
if image['z_dim'] is None:
print("WARNING: you have 3D tracking data but your image is not a z-stack, for optimal 3D "
"viewing, use a z-stack")
viewer.add_image(im, name='image')
else:
z_step_ = 1 if z_step is None else z_step # in case z_step not given set it to 1
viewer.add_image(im, name='image', scale=(1, z_step_, 1, 1))
else:
cols = ['frame', 'y', 'x']
axis_labels = ['t', 'z', 'x', 'y'] if 'z' in df.columns else ['t', 'x', 'y']
viewer = napari.Viewer(axis_labels=axis_labels)
cols = ['frame', 'z', 'y', 'x'] if 'z' in df.columns else ['frame', 'y', 'x']
# if there is an image to plot on
if image is not None:
if image['image_fn'] is not None:
im = io.imread(image['image_fn'])
# if 3D data
if 'z' in df.columns:
if image['z_dim'] is None:
print("WARNING: you have 3D tracking data but your image is not a z-stack, for optimal 3D "
"viewing, use a z-stack")
cols = ['frame', 'y', 'x'] # restrict to 2D data
viewer.add_image(im, name='image')
else:
cols = ['frame', 'z', 'y', 'x'] if 'z' in df.columns else ['frame', 'y', 'x']
else:
z_step_ = 1 if z_step is None else z_step # in case z_step not given set it to 1
viewer.add_image(im, name='image', scale=(1, z_step_, 1, 1))
else:
viewer.add_image(im, name='image')
df = df.sort_values(by=['track', 'frame']) # napari track layer requires data to be sorted by ID then frame
df = df.sort_values(by=['track', 'frame']) # napari track layer requires data to be sorted by ID then frame
points = df[cols].values
tracks = df[['track'] + cols].values
points = df[cols].values
tracks = df[['track'] + cols].values
properties = {'time': df['t'].values, 'velocity': df['v'].values, 'acceleration': df['a'].values}
if 'z' in df.columns:
properties['z'] = df['z'].values
properties = {'time': df['t'].values, 'velocity': df['v'].values, 'acceleration': df['a'].values}
if 'z' in df.columns:
properties['z'] = df['z'].values
viewer.add_points(points, name='objects', size=1, opacity=0.3)
viewer.add_tracks(tracks, properties=properties, name='trajectories')
viewer.add_points(points, name='objects', size=1, opacity=0.3)
viewer.add_tracks(tracks, properties=properties, name='trajectories')
napari.run()
def plot_all_traj(data_dir, df, image={'image_fn': None, 't_dim': None, 'z_dim': None}, parallelize=False, dim=3,
......
......@@ -26,6 +26,7 @@
import os
import os.path as osp
import csv
import time
import matplotlib.pyplot as plt
from matplotlib import cm
......@@ -141,7 +142,7 @@ def make_unit_label(dimension='L', l_unit='um', t_unit='min'):
:return: label as a Latex formatted string
:rtype: str
"""
l_unit_dict = {'um': r'\mu m', 'mm': r'mm', 'px': 'px', 'none': '', 'au': ''}
l_unit_dict = {'um': r'\mu m', 'mm': r'mm', 'm': r'm', 'px': 'px', 'none': '', 'au': ''}
t_unit_dict = {'min': r'min', 's': r's', 'frame': 'frame', 'none': '', 'au': ''}
if dimension == 'L':
......@@ -330,14 +331,26 @@ def load_dict(filename):
mydict = {}
for rows in reader:
if len(rows) > 0:
if rows[1] == '':
mydict[rows[0]] = None
else:
try:
mydict[rows[0]] = eval(rows[1]) # if needs conversion
except:
mydict[rows[0]] = rows[1] # if string
if len(rows) == 2:
if rows[1] == '':
mydict[rows[0]] = None
else:
try:
mydict[rows[0]] = eval(rows[1]) # if needs conversion
except:
mydict[rows[0]] = rows[1] # if string
elif len(rows) > 2: # if line splitted because of comma, try to concatenate line
val = ''
for i in range(1,len(rows)-1):
val += rows[i]+','
val += rows[len(rows)-1]
if val == '':
mydict[rows[0]] = None
else:
try:
mydict[rows[0]] = eval(val) # if needs conversion
except:
mydict[rows[0]] = val # if string
return mydict
......@@ -768,60 +781,90 @@ def filter_by_region(df, xlim=None, ylim=None, zlim=None):
return df_
def get_coordinates(image, interactive=True, verbose=True):
def get_coordinates(image, df=None, verbose=True):
"""
Interactive selection of coordinates on an image by hand-drawing using a Napari viewer.
Selection supported: points and rectangle.
To be used with get_coordinates
:param image: dict returned by get_image()
:type image: dict
:param df: pd.dataframe of tracks
:type df: pandas.DataFrame
:param verbose: verbosity
:type verbose: bool
:return: dict of list of selected shapes: {'points':[coordinates1,...],'rectangle':[coordinates1,...]}
:rtype: dict
"""
image_fn = image['image_fn']
t_dim = image['t_dim']
z_dim = image['z_dim']
no_image = False
if df is None and image is None:
raise Exception("No data nor image")
# if no image plot tracks in viewer
elif image is None:
no_image = True
else:
if image["image_fn"] is None:
no_image = True
else:
if not osp.exists(image['image_fn']):
print("Warning: {} does not exist".format(image['image_fn']))
if no_image:
df = df.sort_values(by=['track', 'frame']) # napari track layer requires data to be sorted by ID then frame
if 'z' in df.columns:
cols = ['frame', 'z', 'y', 'x']
t_dim = 0
z_dim = 1
else:
cols = ['frame', 'y', 'x']
t_dim = 0
z_dim = None
tracks = df[['track'] + cols].values
im = io.imread(image_fn)
else:
image_fn = image['image_fn']
t_dim = image['t_dim']
z_dim = image['z_dim']
im = io.imread(image_fn)
selecting = True
while selecting:
# create a list to be modified in get_coord so it is not deleted when get_coord ends
shape_list = []
points_list = []
with napari.gui_qt():
viewer = napari.view_image(im)
if verbose:
print("Draw points or rectangles, then press ENTER and close the image viewer")
# retrieve coodinates on clicking Enter
@viewer.bind_key('Enter')
def get_coord(viewer):
for layer in viewer.layers:
if type(layer) is napari.layers.shapes.shapes.Shapes:
shape_list.append(layer)
if type(layer) is napari.layers.points.points.Points:
points_list.append(layer.data)
# inspect selected layers
rectangle_list = []
if len(shape_list) > 0:
for i, shape_type_ in enumerate(shape_list[0].shape_type):
if shape_type_ == 'rectangle':
rectangle_list.append(shape_list[0].data[i])
points = np.array([])
if len(points_list) > 0:
points = points_list[0]
# interactive validation of selection
if verbose:
print('You have selected {} point(s) and {} rectangle(s)'.format(points.shape[0], len(rectangle_list)))
if interactive:
finished = input('Is the selection correct? [y]/n: ')
if finished != 'n':
selecting = False
else:
selecting = False
# create list to store layers' data from an open Napari viewer
shape_list = []
points_list = []
if no_image:
viewer = napari.Viewer()
viewer.add_tracks(tracks, name='trajectories')
else:
viewer = napari.view_image(im)
# retrieve coodinates on clicking Enter
@viewer.bind_key('Enter')
def get_coord(viewer):
for layer in viewer.layers:
if type(layer) is napari.layers.shapes.shapes.Shapes:
shape_list.append(layer)
if type(layer) is napari.layers.points.points.Points:
points_list.append(layer.data)
viewer.close()
if verbose:
print("Draw points or rectangles, then press ENTER")
napari.run()
# inspect selected layers
rectangle_list = []
if len(shape_list) > 0:
for i, shape_type_ in enumerate(shape_list[0].shape_type): # grab the first element to ignore if Enter is pressed several times
if shape_type_ == 'rectangle':
rectangle_list.append(shape_list[0].data[i])
points = np.array([])
if len(points_list) > 0:
points = points_list[0] # grab the first element to ignore if Enter is pressed several times
if verbose:
print('You have selected {} point(s) and {} rectangle(s)'.format(points.shape[0], len(rectangle_list)))
# retreive coordinates
coord_dict = {'points': [], 'rectangle': []}
......
......@@ -179,7 +179,9 @@ def traj_analysis(data_dir, data=None, image=None, refresh=False, parallelize=Fa
if voronoi_config['run']:
vor_data = tca.compute_all_Voronoi(data_dir, df, outdir=sub_dir,compute_local_area=voronoi_config['compute_local_area'],
area_threshold=voronoi_config['area_threshold'],df_mean=df_prop)
# update csv files with area data
df_prop.to_csv(mean_fn)
df.to_csv(csv_fn)
if voronoi_config['plot']:
if plot_config['save_as_stack']:
......
......@@ -31,18 +31,12 @@ import seaborn as sns
from track_analyzer import prepare as tpr
from track_analyzer import plotting as tpl
# Plotting parameters
color_list = [c['color'] for c in list(plt.rcParams['axes.prop_cycle'])] + sns.color_palette("Set1", n_colors=9,
desat=.5)
plot_param = {'figsize': (5, 5), 'dpi': 300, 'color_list': color_list, 'format': '.png', 'despine': True, 'logx': False,
'logy': False, 'invert_yaxis': True, 'export_data_pts': False}
def compare_datasets(data_dir, df_list=[], track_df_list=[], MSD_df_list=[], datasets_names=[], plotting_mode='compare',
param_couples=None, param_hist=None, param_boxplot=None,
param_track_couples=None, param_track_hist=None, param_track_boxplot=None,
MSD_plot_param={'logplot_x': True, 'logplot_y': True, 'alpha': 0.2},
plot_param=plot_param):
plot_config=None):
"""Compare different datasets with a list of plotting methods: couples of parameters, histograms, boxplot.
Each of these three methods (param_couples,param_hist,param_boxplot) are given as dict containing the list of parameters and some specific arguments.
"""
......@@ -82,7 +76,7 @@ def compare_datasets(data_dir, df_list=[], track_df_list=[], MSD_df_list=[], dat
print("plotting scatter plots")
for param_couple in param_couples['couples']:
tpl.plot_param_vs_param(data_dir, param_couple[0], param_couple[1], df=pooled_all_df, hue=hue,
hue_order=hue_order, set_axis_lim=param_couples['axis_lim'], plot_param=plot_param,
hue_order=hue_order, set_axis_lim=param_couples['axis_lim'], plot_config=plot_config,
plot_dir=plot_dir, suffix='_' + names_str)
if param_track_couples is not None:
......@@ -91,23 +85,23 @@ def compare_datasets(data_dir, df_list=[], track_df_list=[], MSD_df_list=[], dat
for param_couple in param_track_couples['couples']:
tpl.plot_param_vs_param(data_dir, param_couple[0], param_couple[1], df=pooled_track_df, hue=hue,
hue_order=hue_order, set_axis_lim=param_track_couples['axis_lim'],
plot_param=plot_param, plot_dir=plot_dir, suffix='_' + names_str)
plot_config=plot_config, plot_dir=plot_dir, suffix='_' + names_str)
if param_hist is not None:
if len(param_hist['param']) > 0:
print("plotting histograms")
for param in param_hist['param']:
tpl.plot_param_hist(data_dir, param, df=pooled_all_df, hue=hue, hue_order=hue_order,
hist=param_hist['hist'], kde=param_hist['kde'], rug=param_hist['rug'],
plot_param=plot_param, plot_dir=plot_dir, suffix='_' + names_str)
hist=param_hist['hist'], kde=param_hist['kde'],
plot_config=plot_config, plot_dir=plot_dir, suffix='_' + names_str)
if param_track_hist is not None:
if len(param_track_hist['param']) > 0:
print("plotting histograms")
for param in param_track_hist['param']:
tpl.plot_param_hist(data_dir, param, df=pooled_track_df, hue=hue, hue_order=hue_order,
hist=param_track_hist['hist'], kde=param_track_hist['kde'], rug=param_track_hist['rug'],
plot_param=plot_param, plot_dir=plot_dir, suffix='_' + names_str)
hist=param_track_hist['hist'], kde=param_track_hist['kde'],
plot_config=plot_config, plot_dir=plot_dir, suffix='_' + names_str)
if param_boxplot is not None:
if len(param_boxplot['param']) > 0:
......@@ -115,7 +109,7 @@ def compare_datasets(data_dir, df_list=[], track_df_list=[], MSD_df_list=[], dat
for param in param_boxplot['param']:
tpl.plot_param_boxplot(data_dir, df=pooled_all_df, x_param=x_param, param=param, order=order,
save_stat=param_boxplot['save_stat'], boxplot=param_boxplot['boxplot'],
swarmplot=param_boxplot['swarmplot'], plot_param=plot_param, plot_dir=plot_dir,
swarmplot=param_boxplot['swarmplot'], plot_config=plot_config, plot_dir=plot_dir,
suffix='_' + names_str)
if param_track_boxplot is not None:
......@@ -124,7 +118,7 @@ def compare_datasets(data_dir, df_list=[], track_df_list=[], MSD_df_list=[], dat
for param in param_track_boxplot['param']:
tpl.plot_param_boxplot(data_dir, df=pooled_track_df, x_param=x_param, param=param, order=order,
save_stat=param_track_boxplot['save_stat'], boxplot=param_track_boxplot['boxplot'],
swarmplot=param_track_boxplot['swarmplot'], plot_param=plot_param, plot_dir=plot_dir,
swarmplot=param_track_boxplot['swarmplot'], plot_config=plot_config, plot_dir=plot_dir,
suffix='_' + names_str)
if MSD_plot_param is not None:
......@@ -135,25 +129,7 @@ def compare_datasets(data_dir, df_list=[], track_df_list=[], MSD_df_list=[], dat
MSD_plot_param['plot_all_MSD'] = True
MSD_plot_param['plot_single_MSD'] = False
tpl.plot_all_MSD(data_dir, msd_all=pooled_MSD_df, fit_model=None, MSD_parameters=MSD_plot_param, hue=hue,
hue_order=hue_order, plot_param=plot_param, plot_dir=plot_dir)
hue_order=hue_order, plot_config=plot_config, plot_dir=plot_dir)
return pooled_all_df, pooled_track_df
def batch_analysis(dirdata, run_='cell_analysis', refresh=False, invert_yaxis=True):
dirdata_l = tpr.listdir_nohidden(dirdata)
dirdata_l_ = []
for d in dirdata_l:
dirdata_ = osp.join(dirdata, d)
if osp.isdir(dirdata_) and d != 'outdata':
dirdata_l_.append(d)
for i, d in enumerate(dirdata_l_):
dirdata_ = osp.join(dirdata, d)
print("processing directory {} ({}/{})".format(d, i + 1, len(dirdata_l_)))
if run_ == 'cell_analysis':
cell_analysis(dirdata_, no_bkg=True, show_axis=True, plot_vel=None, min_traj_len=10, plot_vs_Y=True,
dont_plot_cells=True, refresh=refresh, invert_yaxis=invert_yaxis)
# analysis_func(dirdata_,no_bkg=True,show_axis=True,plot_vel=None,min_traj_len=10,plot_vs_Y=True,dont_plot_cells=True,dont_set_origin=True)
elif run_ == 'pooled_MSD':
df, lengthscale, timescale, columns, dim = tpr.get_data(dirdata_)
plot_pooled_MSD(data_dir, dt=timescale, plot_method='along_Y')
......@@ -61,7 +61,7 @@ def make_diff_traj(part_index=0, grid_size=[500, 500, 500], dim=3, tmax=10, peri
displacement['r'] = np.sqrt(displacement['r2'])
for i in range(dim):
displacement[list('xyz')[i]] /= displacement['r'] # normalize raw displacement
displacement[list('xyz')[i]] *= noise_amp # amply amplitude
displacement[list('xyz')[i]] *= noise_amp # apply amplitude
displacement[list('xyz')[i]] += bias[i] # add bias
displacement = displacement[list('xyz')[0:dim]].values
......@@ -78,9 +78,11 @@ def make_diff_traj(part_index=0, grid_size=[500, 500, 500], dim=3, tmax=10, peri
def make_spatial_gradient(part_num=100, grid_size=[500, 500, 500], dim=3, tmax=10, periodic=True,
bias_basis=[0, 0, 0],
diff_grad={'min': 0, 'max': 10}, bias_grad={'min': 0, 'max': 10, 'dim': 0},
grad={'step_num': 4, 'dim': 0},
grad={'step_num': 4, 'dim': 0, 'boundaries':None},
x0_range={'x': [0.1, 0.9], 'y': [0.1, 0.9], 'z': [0.1, 0.9]}, dt=1):
"""Make a spatial gradient (number of steps on the gradient given by grad['step_num'})in diffusion or bias, along a specific dimension, given by grad['dim'].
"""
Make a spatial gradient in diffusion or bias, along a specific dimension, given by grad['dim'].
The number of steps on the gradient is given by grad['step_num'].
The gradient can be in diffusion with diff_grad or bias_grad. min and max give the extrema of the gradient, and bias_grad['dim'] give the dimension along the gradient in bias is applied.
An overall constant bias can be passed by bias_basis.
"""
......@@ -90,11 +92,13 @@ def make_spatial_gradient(part_num=100, grid_size=[500, 500, 500], dim=3, tmax=1
diff_grad_ = np.linspace(diff_grad['min'], diff_grad['max'], grad['step_num'])
bias_grad_ = np.linspace(bias_grad['min'], bias_grad['max'], grad['step_num'])
# spatial boundaries of the regions of particles
lims = [[x0_range['x'][0] * grid_size[0], x0_range['x'][1] * grid_size[0]],
[x0_range['y'][0] * grid_size[1], x0_range['y'][1] * grid_size[1]],
[x0_range['z'][0] * grid_size[2], x0_range['z'][1] * grid_size[2]]]
part_count = 0
for i in range(grad['step_num']):
grad_increment = (lims[grad['dim']][1] - lims[grad['dim']][0]) / grad['step_num']
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment