diff --git a/scripts/reinstall.sh b/scripts/reinstall.sh
index 3bcad5c72c458c75ec47a100ed7f8c74b3f3ac6d..a4442e65d00068be883f615345a943fdc29fd3d6 100755
--- a/scripts/reinstall.sh
+++ b/scripts/reinstall.sh
@@ -8,5 +8,12 @@ poetry env use python3.8
 rm -f poetry.lock
 poetry install -vvv
 
+if [ "$(git branch --show-current)" = "design" ]; then
+# register the venv with Jupyter
+envname=$(basename $(poetry env info -p))
+poetry run python -m ipykernel install --user --name $envname
+sed -i "s|],|],\n \"env\": {\n  \"JULIA_PROJECT\": \"$(realpath ../TaggingBackends)\"\n },|" ~/.local/share/jupyter/kernels/${envname,,}/kernel.json
+fi
+
 # if pyproject.toml was not updated in the first place:
-#poetry install ../TaggingBackends -vvv
+#poetry add ../TaggingBackends -vvv
diff --git a/src/maggotuba/models/modules.py b/src/maggotuba/models/modules.py
index a0b03fb1f5ededeeb18a750795bf53945e95e7f3..fbb3b844b6bd4212872ff016343acfded091ed74 100644
--- a/src/maggotuba/models/modules.py
+++ b/src/maggotuba/models/modules.py
@@ -152,7 +152,7 @@ class MaggotEncoder(MaggotModule):
         # if state file not found or config option "load_state" is False,
         # (re-)initialize the model's weights
         if _reason:
-            logging.info(f"initializing the encoder ({_reason})")
+            logging.debug(f"initializing the encoder ({_reason})")
             _init, _bias = config.get('init', None), config.get('bias', None)
             for child in encoder.children():
                 if isinstance(child,
@@ -222,6 +222,9 @@ class PretrainedMaggotEncoder(MaggotEncoder):
 
     def save(self, ptfile="retrained_encoder.pt"):
         self.ptfile = ptfile
+        # "load_state" was introduced in json config file as a mechanism to load
+        # untrained encoders; once trained, this key must be removed:
+        self.config.pop('load_state', None)
         return super().save()
 
 class MaggotEncoders(nn.Module):
diff --git a/src/maggotuba/models/train_model.py b/src/maggotuba/models/train_model.py
index 768e82896f2b6d8e30b27010c9328e89a1a7a62d..516bc0cf1d67218631ed7cf2b18cce105cf7b51d 100644
--- a/src/maggotuba/models/train_model.py
+++ b/src/maggotuba/models/train_model.py
@@ -4,15 +4,16 @@ from taggingbackends.explorer import check_permissions
 from maggotuba.models.trainers import make_trainer, new_generator
 import json
 import glob
-import logging
 
-def train_model(backend, layers=1, pretrained_model_instance="default", subsets=(1, 0, 0), **kwargs):
+def train_model(backend, layers=1, pretrained_model_instance="default",
+                subsets=(1, 0, 0), rng_seed=None, **kwargs):
     # make_dataset generated or moved the larva_dataset file into data/interim/{instance}/
     #larva_dataset_file = backend.list_interim_files("larva_dataset_*.hdf5") # recursive
     larva_dataset_file = glob.glob(str(backend.interim_data_dir() / "larva_dataset_*.hdf5")) # not recursive (faster)
     assert len(larva_dataset_file) == 1
     # subsets=(1, 0, 0) => all data are training data; no validation or test subsets
-    dataset = LarvaDataset(larva_dataset_file[0], new_generator(), subsets=subsets, **kwargs)
+    dataset = LarvaDataset(larva_dataset_file[0], new_generator(rng_seed),
+                           subsets=subsets, **kwargs)
     labels = dataset.labels
     assert 0 < len(labels)
     labels = labels if isinstance(labels[0], str) else [s.decode() for s in labels]
@@ -26,7 +27,7 @@ def train_model(backend, layers=1, pretrained_model_instance="default", subsets=
         model = make_trainer(config_files, labels, layers)
     # fine-tune and save the model
     model.train(dataset)
-    logging.info(f"saving model \"{backend.model_instance}\"")
+    print(f"saving model \"{backend.model_instance}\"")
     model.save()
 
 # TODO: merge the below two functions
diff --git a/src/maggotuba/models/trainers.py b/src/maggotuba/models/trainers.py
index ede624423ee157a50046c2052e82088ff57e9582..6bdc4b852a9e0441c2114652efe3e86bac8d5f33 100644
--- a/src/maggotuba/models/trainers.py
+++ b/src/maggotuba/models/trainers.py
@@ -1,4 +1,3 @@
-import logging
 import numpy as np
 import torch
 import torch.nn as nn
@@ -149,11 +148,12 @@ class MaggotTrainer:
         model.to(self.device)
         criterion = nn.CrossEntropyLoss()
         nsteps = self.config['optim_iter']
+        grad_clip = self.config['grad_clip']
         # pre-train the classifier with static encoder weights
         if model.encoder.was_pretrained():
             nsteps = nsteps // 2
             optimizer = torch.optim.Adam(model.clf.parameters())
-            logging.info("pre-training the classifier...")
+            print("pre-training the classifier...")
             for step in range(nsteps):
                 optimizer.zero_grad()
                 # TODO: add an option for renormalizing the input
@@ -161,18 +161,19 @@ class MaggotTrainer:
                 predicted = self.forward(data, train=True)
                 loss = criterion(predicted, expected)
                 loss.backward()
+                nn.utils.clip_grad_norm_(model.clf.parameters(), grad_clip)
                 optimizer.step()
         # fine-tune both the encoder and the classifier
         optimizer = torch.optim.Adam(model.parameters())
-        logging.info(
-                ("fine-tuning" if model.encoder.was_pretrained() else "training") + \
-                 " the encoder and classifier...")
+        print(("fine-tuning" if model.encoder.was_pretrained() else "training") + \
+               " the encoder and classifier...")
         for step in range(nsteps):
             optimizer.zero_grad()
             data, expected = self.draw(dataset)
             predicted = self.forward(data, train=True)
             loss = criterion(predicted, expected)
             loss.backward()
+            nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
             optimizer.step()
         #
         return self
@@ -218,8 +219,11 @@ class MaggotTrainer:
     def save(self):
         self.model.save()
 
-def new_generator(seed=0b11010111001001101001110):
-    return torch.Generator(device).manual_seed(seed)
+def new_generator(seed=None):
+    generator = torch.Generator(device)
+    if seed == 'random': return generator
+    if seed is None: seed = 0b11010111001001101001110
+    return generator.manual_seed(seed)
 
 
 class MultiscaleMaggotTrainer(MaggotTrainer):