diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6aa5c9d764cc98c3db9a091bd55cce7ff3ee885d..34a47a0a1d383201a132713a973a23a56214c74e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,7 @@ Note: Breaking changes between versions are indicated by "💥".
 ## Unreleased
 
 - [Improvement] Fix tls certificate generation in k8s
+- [Improvement] Radically change the way jobs are run: we no longer "exec", but instead run a dedicated container.
 - [Improvement] Upgrade k8s certificate issuer to cert-manager.io/v1alpha2
 - [Feature] Add SCORM XBlock to default openedx docker image
 
diff --git a/docs/plugins/api.rst b/docs/plugins/api.rst
index 048a92ebbd865be57c4509c3aab5748548d27a6c..f89877696462dda850e1c53ea587e03be40c173b 100644
--- a/docs/plugins/api.rst
+++ b/docs/plugins/api.rst
@@ -83,6 +83,10 @@ Example::
     
 During initialisation, "myservice1" and "myservice2" will be run in sequence with the commands defined in the templates ``myplugin/hooks/myservice1/init`` and ``myplugin/hooks/myservice2/init``.
 
+To initialise a "foo" service, Tutor runs the "foo-job" service that is found in the ``env/local/docker-compose.jobs.yml`` file. By default, Tutor comes with a few services in this file: mysql-job, lms-job, cms-job, forum-job. If your plugin requires running custom services during initialisation, you will need to add them to the ``docker-compose.jobs.yml`` template. To do so, just use the "local-docker-compose-jobs-services" patch.
+
+In Kubernetes, the approach is the same, except that jobs are implemented as actual job objects in the ``k8s/jobs.yml`` template. To add your own services there, your plugin should implement the "k8s-jobs" patch.
+
 ``pre-init``
 ++++++++++++
 
diff --git a/requirements/base.in b/requirements/base.in
index 8b547a6b937a08326ed92e56829202d83f1ccce4..3e7ed8d347b5f857f53e5065531fee72bb97819f 100644
--- a/requirements/base.in
+++ b/requirements/base.in
@@ -2,4 +2,5 @@ appdirs
 click>=7.0
 click_repl
 jinja2>=2.9
+kubernetes
 pyyaml>=4.2b1
diff --git a/requirements/base.txt b/requirements/base.txt
index 33a575846c1e6e105d879a380f269dbfa36b1d81..59162bb70a064f520f9e87f7ca62bd8de0aa1fd6 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -5,11 +5,29 @@
 #    pip-compile requirements/base.in
 #
 appdirs==1.4.3
+cachetools==4.1.0         # via google-auth
+certifi==2020.4.5.1       # via kubernetes, requests
+chardet==3.0.4            # via requests
 click-repl==0.1.6
 click==7.1.1
+google-auth==1.14.0       # via kubernetes
+idna==2.9                 # via requests
 jinja2==2.11.1
+kubernetes==11.0.0
 markupsafe==1.1.1         # via jinja2
+oauthlib==3.1.0           # via requests-oauthlib
 prompt-toolkit==3.0.5     # via click-repl
+pyasn1-modules==0.2.8     # via google-auth
+pyasn1==0.4.8             # via pyasn1-modules, rsa
+python-dateutil==2.8.1    # via kubernetes
 pyyaml==5.3.1
-six==1.14.0               # via click-repl
+requests-oauthlib==1.3.0  # via kubernetes
+requests==2.23.0          # via kubernetes, requests-oauthlib
+rsa==4.0                  # via google-auth
+six==1.14.0               # via click-repl, google-auth, kubernetes, python-dateutil, websocket-client
+urllib3==1.25.9           # via kubernetes, requests
 wcwidth==0.1.9            # via prompt-toolkit
+websocket-client==0.57.0  # via kubernetes
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 87a44f1273fd04c70d2a65e9c500decdbac53fec..a803e80ef6401d264b46b38f04777285836bb76f 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -10,44 +10,54 @@ astroid==2.3.3            # via pylint
 attrs==19.3.0             # via black
 black==19.10b0
 bleach==3.1.4             # via readme-renderer
-certifi==2019.11.28       # via requests
+cachetools==4.1.0
+certifi==2020.4.5.1
 cffi==1.14.0              # via cryptography
-chardet==3.0.4            # via requests
+chardet==3.0.4
 click-repl==0.1.6
 click==7.1.1
 cryptography==2.8         # via secretstorage
 docutils==0.16            # via readme-renderer
-idna==2.9                 # via requests
+google-auth==1.14.0
+idna==2.9
 importlib-metadata==1.6.0  # via keyring, twine
 isort==4.3.21             # via pylint
 jeepney==0.4.3            # via keyring, secretstorage
 jinja2==2.11.1
 keyring==21.2.0           # via twine
+kubernetes==11.0.0
 lazy-object-proxy==1.4.3  # via astroid
 markupsafe==1.1.1
 mccabe==0.6.1             # via pylint
+oauthlib==3.1.0
 pathspec==0.7.0           # via black
 pip-tools==4.5.1
 pkginfo==1.5.0.1          # via twine
 prompt-toolkit==3.0.5
+pyasn1-modules==0.2.8
+pyasn1==0.4.8
 pycparser==2.20           # via cffi
 pygments==2.6.1           # via readme-renderer
 pyinstaller==3.6
 pylint==2.4.4
+python-dateutil==2.8.1
 pyyaml==5.3.1
 readme-renderer==25.0     # via twine
 regex==2020.2.20          # via black
+requests-oauthlib==1.3.0
 requests-toolbelt==0.9.1  # via twine
-requests==2.23.0          # via requests-toolbelt, twine
+requests==2.23.0
+rsa==4.0
 secretstorage==3.1.2      # via keyring
 six==1.14.0
 toml==0.10.0              # via black
 tqdm==4.44.1              # via twine
 twine==3.1.1
 typed-ast==1.4.1          # via astroid, black
-urllib3==1.25.8           # via requests
+urllib3==1.25.9
 wcwidth==0.1.9
 webencodings==0.5.1       # via bleach
+websocket-client==0.57.0
 wrapt==1.11.2             # via astroid
 zipp==3.1.0               # via importlib-metadata
 
diff --git a/requirements/docs.txt b/requirements/docs.txt
index bb7cd33890a6aea9d3eb10a7456eb8a7a4a24421..b91176bfaafa48c0f06e66e7f45a73bf2f11796f 100644
--- a/requirements/docs.txt
+++ b/requirements/docs.txt
@@ -7,22 +7,31 @@
 alabaster==0.7.12         # via sphinx
 appdirs==1.4.3
 babel==2.8.0              # via sphinx
-certifi==2019.11.28       # via requests
-chardet==3.0.4            # via requests
+cachetools==4.1.0
+certifi==2020.4.5.1
+chardet==3.0.4
 click-repl==0.1.6
 click==7.1.1
 docutils==0.16            # via sphinx
-idna==2.9                 # via requests
+google-auth==1.14.0
+idna==2.9
 imagesize==1.2.0          # via sphinx
 jinja2==2.11.1
+kubernetes==11.0.0
 markupsafe==1.1.1
+oauthlib==3.1.0
 packaging==20.3           # via sphinx
 prompt-toolkit==3.0.5
+pyasn1-modules==0.2.8
+pyasn1==0.4.8
 pygments==2.6.1           # via sphinx
 pyparsing==2.4.6          # via packaging
+python-dateutil==2.8.1
 pytz==2019.3              # via babel
 pyyaml==5.3.1
-requests==2.23.0          # via sphinx
+requests-oauthlib==1.3.0
+requests==2.23.0
+rsa==4.0
 six==1.14.0
 snowballstemmer==2.0.0    # via sphinx
 sphinx-rtd-theme==0.4.3
@@ -33,8 +42,9 @@ sphinxcontrib-htmlhelp==1.0.3  # via sphinx
 sphinxcontrib-jsmath==1.0.1  # via sphinx
 sphinxcontrib-qthelp==1.0.3  # via sphinx
 sphinxcontrib-serializinghtml==1.1.4  # via sphinx
-urllib3==1.25.8           # via requests
+urllib3==1.25.9
 wcwidth==0.1.9
+websocket-client==0.57.0
 
 # The following packages are considered to be unsafe in a requirements file:
 # setuptools
diff --git a/tests/test_config.py b/tests/test_config.py
index 105b2b797b3747e0fb687efea742ef36c58c9a8b..e73a671301a0e60696bbf2b7ba3c3b0b5573585a 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -72,3 +72,9 @@ class ConfigTests(unittest.TestCase):
         self.assertNotIn("LMS_HOST", config)
         self.assertEqual("www.myopenedx.com", defaults["LMS_HOST"])
         self.assertEqual("studio.{{ LMS_HOST }}", defaults["CMS_HOST"])
+
+    def test_is_service_activated(self):
+        config = {"ACTIVATE_SERVICE1": True, "ACTIVATE_SERVICE2": False}
+
+        self.assertTrue(tutor_config.is_service_activated(config, "service1"))
+        self.assertFalse(tutor_config.is_service_activated(config, "service2"))
diff --git a/tests/test_scripts.py b/tests/test_scripts.py
deleted file mode 100644
index dfaf4aeb42873dc6c34330632109860e8350339c..0000000000000000000000000000000000000000
--- a/tests/test_scripts.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import unittest
-import unittest.mock
-
-from tutor import config as tutor_config
-from tutor import scripts
-
-
-class ScriptsTests(unittest.TestCase):
-    def test_is_activated(self):
-        config = {"ACTIVATE_SERVICE1": True, "ACTIVATE_SERVICE2": False}
-        runner = scripts.BaseRunner("/tmp", config)
-
-        self.assertTrue(runner.is_activated("service1"))
-        self.assertFalse(runner.is_activated("service2"))
diff --git a/tutor.spec b/tutor.spec
index 68f6efaf6c629548671da7f5309dde3eae2e88f4..cd362c009511d200c1eabad67be3873582da1258 100644
--- a/tutor.spec
+++ b/tutor.spec
@@ -27,6 +27,7 @@ hidden_imports.append("Crypto.Hash.SHA256")
 hidden_imports.append("Crypto.PublicKey.RSA")
 hidden_imports.append("Crypto.Random")
 hidden_imports.append("Crypto.Signature.PKCS1_v1_5")
+hidden_imports.append("kubernetes")
 hidden_imports.append("uuid")
 
 
diff --git a/tutor/commands/compose.py b/tutor/commands/compose.py
index b9602fc2fd9a84ca1438c6d15893ac0e007d061e..8412a1da9f02d4e35cf688703b25bda4b467e07a 100644
--- a/tutor/commands/compose.py
+++ b/tutor/commands/compose.py
@@ -1,8 +1,10 @@
 import click
 
 from .. import config as tutor_config
+from .. import env as tutor_env
 from .. import fmt
 from .. import scripts
+from .. import serialize
 from .. import utils
 
 
@@ -11,11 +13,62 @@ class ScriptRunner(scripts.BaseRunner):
         super().__init__(root, config)
         self.docker_compose_func = docker_compose_func
 
-    def exec(self, service, command):
+    def run_job(self, service, command):
+        """
+        Run the "{{ service }}-job" service from local/docker-compose.jobs.yml with the
+        specified command. For backward-compatibility reasons, if the corresponding
+        service does not exist, run the service from good old regular
+        docker-compose.yml.
+        """
+        jobs_path = tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.yml")
+        job_service_name = "{}-job".format(service)
         opts = [] if utils.is_a_tty() else ["-T"]
-        self.docker_compose_func(
-            self.root, self.config, "exec", *opts, service, "sh", "-e", "-c", command
-        )
+        if job_service_name in serialize.load(open(jobs_path).read())["services"]:
+            self.docker_compose_func(
+                self.root,
+                self.config,
+                "-f",
+                jobs_path,
+                "run",
+                *opts,
+                "--rm",
+                job_service_name,
+                "sh",
+                "-e",
+                "-c",
+                command,
+            )
+        else:
+            fmt.echo_alert(
+                (
+                    "The '{job_service_name}' service does not exist in {jobs_path}. "
+                    "This might be caused by an older plugin. Tutor switched to a job "
+                    "runner model for running one-time commands, such as database"
+                    " initialisation. For the record, this is the command that we are "
+                    "running:\n"
+                    "\n"
+                    "    {command}\n"
+                    "\n"
+                    "Old-style job running will be deprecated soon. Please inform "
+                    "your plugin maintainer!"
+                ).format(
+                    job_service_name=job_service_name,
+                    jobs_path=jobs_path,
+                    command=command.replace("\n", "\n    "),
+                )
+            )
+            self.docker_compose_func(
+                self.root,
+                self.config,
+                "run",
+                *opts,
+                "--rm",
+                service,
+                "sh",
+                "-e",
+                "-c",
+                command,
+            )
 
 
 @click.command(help="Update docker images")
@@ -73,7 +126,7 @@ def restart(context, services):
         pass
     else:
         for service in services:
-            if "openedx" == service:
+            if service == "openedx":
                 if config["ACTIVATE_LMS"]:
                     command += ["lms", "lms-worker"]
                 if config["ACTIVATE_CMS"]:
@@ -138,7 +191,7 @@ def run_hook(context, service, path):
     fmt.echo_info(
         "Running '{}' hook in '{}' container...".format(".".join(path), service)
     )
-    runner.run(service, *path)
+    runner.run_job_from_template(service, *path)
 
 
 @click.command(help="View output from containers")
@@ -171,11 +224,10 @@ def logs(context, follow, tail, service):
 def createuser(context, superuser, staff, password, name, email):
     config = tutor_config.load(context.root)
     runner = ScriptRunner(context.root, config, context.docker_compose)
-    runner.check_service_is_activated("lms")
     command = scripts.create_user_command(
         superuser, staff, name, email, password=password
     )
-    runner.exec("lms", command)
+    runner.run_job("lms", command)
 
 
 @click.command(
diff --git a/tutor/commands/dev.py b/tutor/commands/dev.py
index f030630500a9d9a12a3fe60322da0b66240d4ce0..b2fd6d5cb0ce4400bff8a153b5315ac12ad51e44 100644
--- a/tutor/commands/dev.py
+++ b/tutor/commands/dev.py
@@ -12,20 +12,19 @@ from .. import utils
 class DevContext(Context):
     @staticmethod
     def docker_compose(root, config, *command):
-        args = [
-            "-f",
-            tutor_env.pathjoin(root, "local", "docker-compose.yml"),
-        ]
-        override_path = tutor_env.pathjoin(root, "local", "docker-compose.override.yml")
-        if os.path.exists(override_path):
-            args += ["-f", override_path]
-        args += [
-            "-f",
-            tutor_env.pathjoin(root, "dev", "docker-compose.yml"),
-        ]
-        override_path = tutor_env.pathjoin(root, "dev", "docker-compose.override.yml")
-        if os.path.exists(override_path):
-            args += ["-f", override_path]
+        args = []
+        for folder in ["local", "dev"]:
+            # Add docker-compose.yml and docker-compose.override.yml (if it exists)
+            # from "local" and "dev" folders
+            args += [
+                "-f",
+                tutor_env.pathjoin(root, folder, "docker-compose.yml"),
+            ]
+            override_path = tutor_env.pathjoin(
+                root, folder, "docker-compose.override.yml"
+            )
+            if os.path.exists(override_path):
+                args += ["-f", override_path]
         return utils.docker_compose(
             *args, "--project-name", config["DEV_PROJECT_NAME"], *command,
         )
diff --git a/tutor/commands/k8s.py b/tutor/commands/k8s.py
index bffa58deec9851e3e0423eed72b5b4021014cf56..b837d1954bf3bb307eeba7ba3ae9bbd140f798a1 100644
--- a/tutor/commands/k8s.py
+++ b/tutor/commands/k8s.py
@@ -1,10 +1,15 @@
+from datetime import datetime
+from time import sleep
+
 import click
 
 from .. import config as tutor_config
 from .. import env as tutor_env
+from .. import exceptions
 from .. import fmt
 from .. import interactive as interactive_config
 from .. import scripts
+from .. import serialize
 from .. import utils
 
 
@@ -47,6 +52,7 @@ def start(context):
         "app.kubernetes.io/component=namespace",
     )
     # Create volumes
+    # TODO: instead, we should use StatefulSets
     utils.kubectl(
         "apply",
         "--kustomize",
@@ -55,8 +61,14 @@ def start(context):
         "--selector",
         "app.kubernetes.io/component=volume",
     )
-    # Create everything else
-    utils.kubectl("apply", "--kustomize", tutor_env.pathjoin(context.root))
+    # Create everything else except jobs
+    utils.kubectl(
+        "apply",
+        "--kustomize",
+        tutor_env.pathjoin(context.root),
+        "--selector",
+        "app.kubernetes.io/component!=job",
+    )
 
 
 @click.command(help="Stop a running platform")
@@ -64,7 +76,9 @@ def start(context):
 def stop(context):
     config = tutor_config.load(context.root)
     utils.kubectl(
-        "delete", *resource_selector(config), "deployments,services,ingress,configmaps"
+        "delete",
+        *resource_selector(config),
+        "deployments,services,ingress,configmaps,jobs",
     )
 
 
@@ -108,7 +122,7 @@ def init(context):
     config = tutor_config.load(context.root)
     runner = K8sScriptRunner(context.root, config)
     for service in ["mysql", "elasticsearch", "mongodb"]:
-        if runner.is_activated(service):
+        if tutor_config.is_service_activated(config, service):
             wait_for_pod_ready(config, service)
     scripts.initialise(runner)
 
@@ -126,8 +140,6 @@ def init(context):
 @click.pass_obj
 def createuser(context, superuser, staff, password, name, email):
     config = tutor_config.load(context.root)
-    runner = K8sScriptRunner(context.root, config)
-    runner.check_service_is_activated("lms")
     command = scripts.create_user_command(
         superuser, staff, name, email, password=password
     )
@@ -189,24 +201,161 @@ def logs(context, container, follow, tail, service):
     utils.kubectl(*command)
 
 
+class K8sClients:
+    _instance = None
+
+    def __init__(self):
+        # Loading the kubernetes module here to avoid import overhead
+        from kubernetes import client, config  # pylint: disable=import-outside-toplevel
+
+        config.load_kube_config()
+        self._batch_api = None
+        self._core_api = None
+        self._client = client
+
+    @classmethod
+    def instance(cls):
+        if cls._instance is None:
+            cls._instance = cls()
+        return cls._instance
+
+    @property
+    def batch_api(self):
+        if self._batch_api is None:
+            self._batch_api = self._client.BatchV1Api()
+        return self._batch_api
+
+    @property
+    def core_api(self):
+        if self._core_api is None:
+            self._core_api = self._client.CoreV1Api()
+        return self._core_api
+
+
 class K8sScriptRunner(scripts.BaseRunner):
-    def exec(self, service, command):
-        kubectl_exec(self.config, service, command, attach=False)
+    def load_job(self, name):
+        jobs = self.render("k8s", "jobs.yml")
+        for job in serialize.load_all(jobs):
+            if job["metadata"]["name"] == name:
+                return job
+        raise ValueError("Could not find job '{}'".format(name))
+
+    def active_job_names(self):
+        """
+        Return a list of active job names
+        Docs:
+        https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#list-job-v1-batch
+        """
+        api = K8sClients.instance().batch_api
+        return [
+            job.metadata.name
+            for job in api.list_namespaced_job(self.config["K8S_NAMESPACE"]).items
+            if job.status.active
+        ]
+
+    def run_job(self, service, command):
+        job_name = "{}-job".format(service)
+        try:
+            job = self.load_job(job_name)
+        except ValueError:
+            message = (
+                "The '{job_name}' kubernetes job does not exist in the list of job "
+                "runners. This might be caused by an older plugin. Tutor switched to a"
+                " job runner model for running one-time commands, such as database"
+                " initialisation. For the record, this is the command that we are "
+                "running:\n"
+                "\n"
+                "    {command}\n"
+                "\n"
+                "Old-style job running will be deprecated soon. Please inform "
+                "your plugin maintainer!"
+            ).format(
+                job_name=job_name,
+                command=command.replace("\n", "\n    "),
+            )
+            fmt.echo_alert(message)
+            wait_for_pod_ready(self.config, service)
+            kubectl_exec(self.config, service, command)
+            return
+        # Create a unique job name to make it deduplicate jobs and make it easier to
+        # find later. Logs of older jobs will remain available for some time.
+        job_name += "-" + datetime.now().strftime("%Y%m%d%H%M%S")
+
+        # Wait until all other jobs are completed
+        while True:
+            active_jobs = self.active_job_names()
+            if not active_jobs:
+                break
+            fmt.echo_info(
+                "Waiting for active jobs to terminate: {}".format(" ".join(active_jobs))
+            )
+            sleep(5)
+
+        # Configure job
+        job["metadata"]["name"] = job_name
+        job["metadata"].setdefault("labels", {})
+        job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name
+        job["spec"]["template"]["spec"]["containers"][0]["args"] = [
+            "sh",
+            "-e",
+            "-c",
+            command,
+        ]
+        job["spec"]["backoffLimit"] = 1
+        job["spec"]["ttlSecondsAfterFinished"] = 3600
+        # Save patched job to "jobs.yml" file
+        with open(tutor_env.pathjoin(self.root, "k8s", "jobs.yml"), "w") as job_file:
+            serialize.dump(job, job_file)
+        # We cannot use the k8s API to create the job: configMap and volume names need
+        # to be found with the right suffixes.
+        utils.kubectl(
+            "apply",
+            "--kustomize",
+            tutor_env.pathjoin(self.root),
+            "--selector",
+            "app.kubernetes.io/name={}".format(job_name),
+        )
+
+        message = (
+            "Job {job_name} is running. To view the logs from this job, run:\n\n"
+            """    kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """
+            """--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n"""
+            "Waiting for job completion..."
+        ).format(job_name=job_name, namespace=self.config["K8S_NAMESPACE"])
+        fmt.echo_info(message)
+
+        # Wait for completion
+        field_selector = "metadata.name={}".format(job_name)
+        while True:
+            jobs = K8sClients.instance().batch_api.list_namespaced_job(
+                self.config["K8S_NAMESPACE"], field_selector=field_selector
+            )
+            if not jobs.items:
+                continue
+            job = jobs.items[0]
+            if not job.status.active:
+                if job.status.succeeded:
+                    fmt.echo_info("Job {} successful.".format(job_name))
+                    break
+                if job.status.failed:
+                    raise exceptions.TutorError(
+                        "Job {} failed. View the job logs to debug this issue.".format(
+                            job_name
+                        )
+                    )
+            sleep(5)
 
 
 def kubectl_exec(config, service, command, attach=False):
     selector = "app.kubernetes.io/name={}".format(service)
-
-    # Find pod in runner deployment
-    wait_for_pod_ready(config, service)
-    fmt.echo_info("Finding pod name for {} deployment...".format(service))
-    pod = utils.check_output(
-        "kubectl",
-        "get",
-        *resource_selector(config, selector),
-        "pods",
-        "-o=jsonpath={.items[0].metadata.name}",
+    pods = K8sClients.instance().core_api.list_namespaced_pod(
+        namespace=config["K8S_NAMESPACE"], label_selector=selector
     )
+    if not pods.items:
+        raise exceptions.TutorError(
+            "Could not find an active pod for the {} service".format(service)
+        )
+    pod_name = pods.items[0].metadata.name
 
     # Run command
     attach_opts = ["-i", "-t"] if attach else []
@@ -215,7 +364,7 @@ def kubectl_exec(config, service, command, attach=False):
         *attach_opts,
         "--namespace",
         config["K8S_NAMESPACE"],
-        pod.decode(),
+        pod_name,
         "--",
         "sh",
         "-e",
diff --git a/tutor/config.py b/tutor/config.py
index ddd596033b291fbd4fb82b6d9f7bbe4e48a95571..2869bebcffdb1d769e322ccc3e00fbb3b58470f8 100644
--- a/tutor/config.py
+++ b/tutor/config.py
@@ -1,4 +1,3 @@
-import json
 import os
 
 from . import exceptions
@@ -128,6 +127,10 @@ def load_plugins(config, defaults):
             defaults[plugin.config_key(key)] = value
 
 
+def is_service_activated(config, service):
+    return config["ACTIVATE_" + service.upper()]
+
+
 def upgrade_obsolete(config):
     # Openedx-specific mysql passwords
     if "MYSQL_PASSWORD" in config:
diff --git a/tutor/scripts.py b/tutor/scripts.py
index 5513416dfc4fec5bd1da1816178ca29415ec82d3..acad5084b3bcc4eefb33570959e00378b0cdc7c6 100644
--- a/tutor/scripts.py
+++ b/tutor/scripts.py
@@ -1,5 +1,4 @@
 from . import env
-from . import exceptions
 from . import fmt
 from . import plugins
 
@@ -14,34 +13,23 @@ class BaseRunner:
         self.root = root
         self.config = config
 
-    def run(self, service, *path):
+    def run_job_from_template(self, service, *path):
         command = self.render(*path)
-        self.exec(service, command)
+        self.run_job(service, command)
 
     def render(self, *path):
         return env.render_file(self.config, *path).strip()
 
-    def exec(self, service, command):
+    def run_job(self, service, command):
         raise NotImplementedError
 
-    def check_service_is_activated(self, service):
-        if not self.is_activated(service):
-            raise exceptions.TutorError(
-                "This command may only be executed on the server where the {} is running".format(
-                    service
-                )
-            )
-
-    def is_activated(self, service):
-        return self.config["ACTIVATE_" + service.upper()]
-
     def iter_plugin_hooks(self, hook):
         yield from plugins.iter_hooks(self.config, hook)
 
 
 def initialise(runner):
     fmt.echo_info("Initialising all services...")
-    runner.run("mysql", "hooks", "mysql", "init")
+    runner.run_job_from_template("mysql", "hooks", "mysql", "init")
     for plugin_name, hook in runner.iter_plugin_hooks("pre-init"):
         for service in hook:
             fmt.echo_info(
@@ -49,17 +37,18 @@ def initialise(runner):
                     plugin_name, service
                 )
             )
-            runner.run(service, plugin_name, "hooks", service, "pre-init")
+            runner.run_job_from_template(
+                service, plugin_name, "hooks", service, "pre-init"
+            )
     for service in ["lms", "cms", "forum"]:
-        if runner.is_activated(service):
-            fmt.echo_info("Initialising {}...".format(service))
-            runner.run(service, "hooks", service, "init")
+        fmt.echo_info("Initialising {}...".format(service))
+        runner.run_job_from_template(service, "hooks", service, "init")
     for plugin_name, hook in runner.iter_plugin_hooks("init"):
         for service in hook:
             fmt.echo_info(
                 "Plugin {}: running init for service {}...".format(plugin_name, service)
             )
-            runner.run(service, plugin_name, "hooks", service, "init")
+            runner.run_job_from_template(service, plugin_name, "hooks", service, "init")
     fmt.echo_info("All services initialised.")
 
 
@@ -90,8 +79,7 @@ u.save()"
 
 
 def import_demo_course(runner):
-    runner.check_service_is_activated("cms")
-    runner.run("cms", "hooks", "cms", "importdemocourse")
+    runner.run_job_from_template("cms", "hooks", "cms", "importdemocourse")
 
 
 def set_theme(theme_name, domain_name, runner):
@@ -108,5 +96,4 @@ site.themes.all().delete()
 site.themes.create(theme_dir_name='{theme_name}')"
 """
     command = command.format(theme_name=theme_name, domain_name=domain_name)
-    runner.check_service_is_activated("lms")
-    runner.exec("lms", command)
+    runner.run_job("lms", command)
diff --git a/tutor/serialize.py b/tutor/serialize.py
index 99a29dadba21b3af77c27c9dbf088f71ea40672b..98b7bdb85699e11e7716faa11a5aba3552283dc7 100644
--- a/tutor/serialize.py
+++ b/tutor/serialize.py
@@ -7,6 +7,10 @@ def load(stream):
     return yaml.load(stream, Loader=yaml.SafeLoader)
 
 
+def load_all(stream):
+    return yaml.load_all(stream, Loader=yaml.SafeLoader)
+
+
 def dump(content, fileobj):
     yaml.dump(content, stream=fileobj, default_flow_style=False)
 
diff --git a/tutor/templates/k8s/deployments.yml b/tutor/templates/k8s/deployments.yml
index 9ea761405ff4e235a2e1539cd3fd0104685986ce..b3730b9c7f8110b36abc1d98057807345abca5e8 100644
--- a/tutor/templates/k8s/deployments.yml
+++ b/tutor/templates/k8s/deployments.yml
@@ -295,6 +295,7 @@ spec:
           persistentVolumeClaim:
             claimName: mongodb
 {% endif %}
+{% if ACTIVATE_MYSQL %}
 ---
 apiVersion: apps/v1
 kind: Deployment
@@ -316,12 +317,7 @@ spec:
       containers:
         - name: mysql
           image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_MYSQL }}
-          {% if ACTIVATE_MYSQL %}
           args: ["mysqld", "--character-set-server=utf8", "--collation-server=utf8_general_ci"]
-          {% else %}
-          command: ["sh", "-e", "-c"]
-          args: ["echo 'ready'; while true; do sleep 60; done"]
-          {% endif %}
           env:
             - name: MYSQL_ROOT_PASSWORD
               valueFrom:
@@ -330,7 +326,6 @@ spec:
                   key: MYSQL_ROOT_PASSWORD
           ports:
             - containerPort: 3306
-          {% if ACTIVATE_MYSQL %}
           volumeMounts:
             - mountPath: /var/lib/mysql
               name: data
@@ -338,7 +333,7 @@ spec:
         - name: data
           persistentVolumeClaim:
             claimName: mysql
-            {% endif %}
+{% endif %}
 {% if ACTIVATE_SMTP %}
 ---
 apiVersion: apps/v1
diff --git a/tutor/templates/k8s/ingress.yml b/tutor/templates/k8s/ingress.yml
index 62c49fd4843388c67f50d5c53d4a64195516d65e..245605ad39d7420c5dfa61fccec39e96586683ab 100644
--- a/tutor/templates/k8s/ingress.yml
+++ b/tutor/templates/k8s/ingress.yml
@@ -1,11 +1,12 @@
 ---{% set hosts = [LMS_HOST, "preview." + LMS_HOST, CMS_HOST] %}
-apiVersion: extensions/v1beta1
+apiVersion: networking.k8s.io/v1beta1
 kind: Ingress
 metadata:
   name: web
   labels:
     app.kubernetes.io/name: web
   annotations:
+    kubernetes.io/ingress.class: nginx
     nginx.ingress.kubernetes.io/proxy-body-size: 1000m
     {% if ACTIVATE_HTTPS%}kubernetes.io/tls-acme: "true"
     cert-manager.io/issuer: letsencrypt{% endif %}
@@ -22,9 +23,11 @@ spec:
   {% if ACTIVATE_HTTPS %}
   tls:
   - hosts:
-    {% for host in hosts %}
-    - {{ host }}{% endfor %}
-    {{ patch("k8s-ingress-tls-hosts")|indent(6) }}
+      {% for host in hosts %}
+      - {{ host }}{% endfor %}
+      {{ patch("k8s-ingress-tls-hosts")|indent(6) }}
+    # TODO maybe we should not take care of generating certificates ourselves
+    # and here just point to a tls secret
     secretName: letsencrypt
   {%endif%}
 {% if ACTIVATE_HTTPS %}
diff --git a/tutor/templates/k8s/jobs.yml b/tutor/templates/k8s/jobs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d5b5768b92023067c6b458172bc8db10810d9137
--- /dev/null
+++ b/tutor/templates/k8s/jobs.yml
@@ -0,0 +1,106 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: lms-job
+  labels:
+    app.kubernetes.io/component: job
+spec:
+  template:
+    spec:
+      restartPolicy: Never
+      containers:
+      - name: lms
+        image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_OPENEDX }}
+        volumeMounts:
+          - mountPath: /openedx/edx-platform/lms/envs/tutor/
+            name: settings-lms
+          - mountPath: /openedx/edx-platform/cms/envs/tutor/
+            name: settings-cms
+          - mountPath: /openedx/config
+            name: config
+      volumes:
+      - name: settings-lms
+        configMap:
+          name: openedx-settings-lms
+      - name: settings-cms
+        configMap:
+          name: openedx-settings-cms
+      - name: config
+        configMap:
+          name: openedx-config
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: cms-job
+  labels:
+    app.kubernetes.io/component: job
+spec:
+  template:
+    spec:
+      restartPolicy: Never
+      containers:
+      - name: cms
+        image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_OPENEDX }}
+        env:
+        - name: SERVICE_VARIANT
+          value: cms
+        volumeMounts:
+          - mountPath: /openedx/edx-platform/lms/envs/tutor/
+            name: settings-lms
+          - mountPath: /openedx/edx-platform/cms/envs/tutor/
+            name: settings-cms
+          - mountPath: /openedx/config
+            name: config
+      volumes:
+      - name: settings-lms
+        configMap:
+          name: openedx-settings-lms
+      - name: settings-cms
+        configMap:
+          name: openedx-settings-cms
+      - name: config
+        configMap:
+          name: openedx-config
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: mysql-job
+  labels:
+    app.kubernetes.io/component: job
+spec:
+  template:
+    spec:
+      restartPolicy: Never
+      containers:
+      - name: mysql
+        image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_MYSQL }}
+        command: []
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: forum-job
+  labels:
+    app.kubernetes.io/component: job
+spec:
+  template:
+    spec:
+      restartPolicy: Never
+      containers:
+      - name: forum
+        image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_FORUM }}
+        env:
+          - name: SEARCH_SERVER
+            value: "{{ ELASTICSEARCH_SCHEME }}://{{ ELASTICSEARCH_HOST }}:{{ ELASTICSEARCH_PORT }}"
+          - name: MONGODB_AUTH
+            value: "{% if MONGODB_USERNAME and MONGODB_PASSWORD %}{{ MONGODB_USERNAME}}:{{ MONGODB_PASSWORD }}@{% endif %}"
+          - name: MONGODB_HOST
+            value: "{{ MONGODB_HOST }}"
+          - name: MONGODB_PORT
+            value: "{{ MONGODB_PORT }}"
+
+{{ patch("k8s-jobs") }}
+
diff --git a/tutor/templates/kustomization.yml b/tutor/templates/kustomization.yml
index 04881b651e4c2fa492b731e209d67be3e0a6f798..8a5048b7278ee4e40b6abb6f75a3e2c6e9081417 100644
--- a/tutor/templates/kustomization.yml
+++ b/tutor/templates/kustomization.yml
@@ -4,7 +4,9 @@ kind: Kustomization
 resources:
 - k8s/namespace.yml
 - k8s/deployments.yml
+# TODO maybe we should not take care of ingress stuff and let the administrator do it
 - k8s/ingress.yml
+- k8s/jobs.yml
 - k8s/services.yml
 - k8s/volumes.yml
 {{ patch("kustomization-resources") }}
diff --git a/tutor/templates/local/docker-compose.jobs.yml b/tutor/templates/local/docker-compose.jobs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..57a30843fd131aef94aa6b61e9c524a94be13070
--- /dev/null
+++ b/tutor/templates/local/docker-compose.jobs.yml
@@ -0,0 +1,37 @@
+version: "3.7"
+services:
+
+    mysql-job:
+      image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_MYSQL }}
+      entrypoint: []
+      command: ["echo", "done"]
+    
+    lms-job:
+      image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_OPENEDX }}
+      environment:
+        SERVICE_VARIANT: lms
+        SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production}
+      volumes:
+        - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro
+        - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro
+        - ../apps/openedx/config/:/openedx/config/:ro
+    
+    cms-job:
+      image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_OPENEDX }}
+      environment:
+        SERVICE_VARIANT: cms
+        SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production}
+      volumes:
+        - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro
+        - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro
+        - ../apps/openedx/config/:/openedx/config/:ro
+    
+    forum-job:
+      image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_FORUM }}
+      environment:
+        SEARCH_SERVER: "{{ ELASTICSEARCH_SCHEME }}://{{ ELASTICSEARCH_HOST }}:{{ ELASTICSEARCH_PORT }}"
+        MONGODB_AUTH: "{% if MONGODB_USERNAME and MONGODB_PASSWORD %}{{ MONGODB_USERNAME}}:{{ MONGODB_PASSWORD }}@{% endif %}"
+        MONGODB_HOST: "{{ MONGODB_HOST }}"
+        MONGODB_PORT: "{{ MONGODB_PORT }}"
+    
+    {{ patch("local-docker-compose-jobs-services")|indent(4) }}
\ No newline at end of file
diff --git a/tutor/templates/local/docker-compose.yml b/tutor/templates/local/docker-compose.yml
index 977d2b9f46df3d2d39682dc0a28267b6cda74ef1..1a802ea08bae8a974f718a9f26c16eec6f30578e 100644
--- a/tutor/templates/local/docker-compose.yml
+++ b/tutor/templates/local/docker-compose.yml
@@ -19,18 +19,15 @@ services:
       - ../../data/mongodb:/data/db
   {% endif %}
 
+  {% if ACTIVATE_MYSQL %}
   mysql:
     image: {{ DOCKER_REGISTRY }}{{ DOCKER_IMAGE_MYSQL }}
-    {% if ACTIVATE_MYSQL %}
     command: mysqld --character-set-server=utf8 --collation-server=utf8_general_ci
-    {% else %}
-    entrypoint: ["sh", "-e", "-c"]
-    command: ["echo 'ready'; while true; do sleep 60; done"]
-    {% endif %}
     restart: unless-stopped
     volumes:
       - ../../data/mysql:/var/lib/mysql
     env_file: ../apps/mysql/auth.env
+  {% endif %}
 
   {% if ACTIVATE_ELASTICSEARCH %}
   elasticsearch: