diff --git a/Dockerfile b/Dockerfile
index ba294b0129795225133622877e372c62f23ece0d..79ad847b19dd5fce9722047f029a3aa05b70e858 100755
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,11 +1,9 @@
-FROM python:latest
-WORKDIR /root
+FROM jupyter/datascience-notebook:latest
+WORKDIR /home/jovyan
+USER root
 COPY . .
-RUN apt-get update
-RUN apt-get install 'ffmpeg'\
-    'libsm6'\ 
-    'libxext6'  -y
 RUN python -V &&\
-    python -m pip install -r requirements.txt &&\
-    python setup.py install
-CMD python -V && jupyter lab
\ No newline at end of file
+    # python -m pip install -r requirements.txt &&\
+    pip install -e .
+USER jovyan
+CMD python -V && jupyter notebook
\ No newline at end of file
diff --git a/droplet_growth/register.py b/droplet_growth/register.py
index 3c0d8ce60bf48c02b6d4a07eb54606bf81c33c00..fee6bbfbb6181afcdfc3fc0a97d7818cfc4c1d93 100644
--- a/droplet_growth/register.py
+++ b/droplet_growth/register.py
@@ -13,6 +13,7 @@ def align_stack(path, template16, mask2, plot=False, binnings=(1,16,2)):
     stack should contain two channels: bright field and fluorescence.
     BF will be binned 8 times and registered with template8 (aligned BF).
     When the transformation verctor will be applied to the original data and stacked with the mask.
+    The output stack is of the same size as mask.
     The resulting 3-layer stack will be returned and also saved with suffix ".aligned.tif"
     '''
 
@@ -58,6 +59,107 @@ def align_stack(path, template16, mask2, plot=False, binnings=(1,16,2)):
     return aligned_stack
 
 
+def align_timelapse(bf, fluo_stack, template16, mask2, plot=False, binnings=(4,16,2)):
+    '''
+    stack should contain two channels: bright field and fluorescence.
+    BF will be binned 8 times and registered with template8 (aligned BF).
+    When the transformation verctor will be applied to the original data and stacked with the mask.
+    The output stack is of the same size as mask.
+    The resulting 3-layer stack will be returned and also saved with suffix ".aligned.tif"
+    '''
+
+    stack_temp_scale = binnings[1] // binnings[0]
+    mask_temp_scale = binnings[1] // binnings[2]
+    mask_bf_scale = binnings[0] // binnings[2]
+    
+    f_bf = filter_by_fft(
+        bf[::stack_temp_scale, ::stack_temp_scale], 
+        sigma=40,
+        fix_horizontal_stripes=True, 
+        fix_vertical_stripes=True,
+        highpass=True
+    )
+    tvec8 = get_transform(f_bf, template16, plot=plot)
+    plt.show()
+    tvec = scale_tvec(tvec8, stack_temp_scale)
+    print(tvec)
+
+    mask = mask2[::mask_bf_scale, ::mask_bf_scale]
+    aligned_bf = unpad(transform(bf, tvec), mask.shape)
+    
+    if plot:
+        plt.figure(dpi=300)
+        plt.imshow(bf, cmap='gray',)# vmax=aligned_tritc.max()/5)
+        plt.colorbar()
+        plt.title('Original image')
+        plt.show()
+
+        plt.figure(dpi=300)
+        plt.imshow(mic.segment.label2rgb(mask, to_8bits(aligned_bf), bg_label=0))
+        plt.title('Aligned mask over aligned image')
+        plt.show()
+
+    aligned_fluo = list(map(lambda x: unpad(transform(x, tvec), mask.shape), fluo_stack))
+
+    return (aligned_bf, aligned_fluo, mask)
+
+
+def align_mask_to_bf(bf_path, template16, mask2, plot=False, binnings=(4,16,2)):
+    '''
+    bf_path sould contain tif with bright field.
+    BF will be binned and used as a template to aligh template8.
+    When the transformation verctor will be applied to the mask which will be returned 
+    resized to the size of the BF input.
+    The aligned mask will be saved as "mask.aligned.tif"
+    '''
+
+    bf = imread(bf_path)
+    print(bf_path, bf.shape)
+
+    bf_temp_scale = binnings[1] // binnings[0]
+    mask_temp_scale = binnings[1] // binnings[2]
+    mask_bf_scale = binnings[0] // binnings[2]
+    
+    f_bf = filter_by_fft(
+        bf[::bf_temp_scale, ::bf_temp_scale], 
+        sigma=40,
+        fix_horizontal_stripes=True, 
+        fix_vertical_stripes=True,
+        highpass=True
+    )
+    tvec8 = get_transform(pad(template16, f_bf.shape), f_bf, plot=plot)
+    plt.show()
+    tvec = scale_tvec(tvec8, mask_temp_scale)
+    print(tvec)
+
+    padded_mask = pad(mask2[::mask_bf_scale, ::mask_bf_scale], bf.shape)
+    aligned_mask = unpad(transform(padded_mask, tvec), bf.shape)
+    
+    if plot:
+        plt.figure(dpi=300)
+        plt.imshow(bf, cmap='gray',)# vmax=aligned_tritc.max()/5)
+        plt.colorbar()
+        plt.title('Original image')
+        plt.show()
+
+        plt.figure(dpi=300)
+        plt.imshow(tvec8["timg"], cmap='gray',)# vmax=aligned_tritc.max()/5)
+        plt.colorbar()
+        plt.title('Aligned template')
+        plt.show()
+
+        plt.figure(dpi=300)
+        plt.imshow(mic.segment.label2rgb(aligned_mask, to_8bits(bf), bg_label=0))
+        plt.title('Aligned mask over orginal image')
+        plt.show()
+
+    save_path = bf_path.replace('.tif', '.mask.aligned.tif')
+    imwrite(save_path, aligned_mask)
+    print(f"Saved {save_path}")
+    
+    return aligned_mask
+
+
 def get_transform(image, template, plot=True, pad_ratio=1.2, figsize=(10,5), dpi=300):
     '''
     Pads image and template, registers and returns tvec
@@ -176,6 +278,7 @@ def scale_tvec(tvec, scale=8):
     
     
 def transform(image, tvec):
+    print(f'input {image.shape}')
     fluo = reg.transform_img_dict(image, tvec)
     return fluo.astype('uint')