diff --git a/autocomplete_multi_models/__init__.py b/autocomplete_multi_models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/admin.py b/autocomplete_multi_models/admin.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8685714a0832bce46d9307e95de00a3a3a8d51d
--- /dev/null
+++ b/autocomplete_multi_models/admin.py
@@ -0,0 +1,10 @@
+from django.contrib import admin
+
+# Register your models here.
+from autocomplete_multi_models import models
+
+
+@admin.register(models.IndexedWord)
+class IndexedWordAdmin(admin.ModelAdmin):
+    list_display = ("word",)
+    search_fields = ("word",)
diff --git a/autocomplete_multi_models/apps.py b/autocomplete_multi_models/apps.py
new file mode 100644
index 0000000000000000000000000000000000000000..eac91f3e90d15381cf2096ec3906c9db7aae60a8
--- /dev/null
+++ b/autocomplete_multi_models/apps.py
@@ -0,0 +1,23 @@
+from functools import partial
+
+from django.apps import AppConfig
+from django.db.models import signals
+
+from autocomplete_multi_models import utils
+
+
+class AutocompleteMultiModelsConfig(AppConfig):
+    default_auto_field = 'django.db.models.BigAutoField'
+    name = 'autocomplete_multi_models'
+
+    __indexed_fields = None
+
+    def ready(self):
+        from . import signals as my_signals
+
+        utils.init_from_settings()
+
+        for model, field_names in utils.get_indexed_fields().items():
+            sender = model
+            signals.post_save.connect(partial(my_signals.instance_update, field_names=field_names), sender=sender)
+            signals.pre_delete.connect(partial(my_signals.instance_delete, field_names=field_names), sender=sender)
diff --git a/autocomplete_multi_models/business_process.py b/autocomplete_multi_models/business_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..763c7547ae495fcc8b68b3b8e20ff31a84425f7c
--- /dev/null
+++ b/autocomplete_multi_models/business_process.py
@@ -0,0 +1,90 @@
+import re
+from typing import Optional, List
+
+from django.contrib.postgres.lookups import Unaccent
+from django.contrib.postgres.search import TrigramSimilarity
+from django.db import connection
+from django.db.models import Exists, OuterRef, Case, When, Value, F
+from django.db.transaction import atomic
+
+from autocomplete_multi_models import utils, models
+
+_pattern = re.compile("[^\\w\\d]")
+
+_AUTOCOMPLETE_MIN_LENGTH = utils.DEFAULT_AUTOCOMPLETE_MIN_LENGTH
+_AUTOCOMPLETE_MIN_SIMILARITY = utils.DEFAULT_AUTOCOMPLETE_MIN_SIMILARITY
+_AUTOCOMPLETE_LIMIT = utils.DEFAULT_AUTOCOMPLETE_LIMIT
+
+
+def get_setting_from_storage(key, default):
+    return key == utils.REBUILD_NEEDED
+
+
+def set_setting_in_storage(key, value):
+    pass
+
+
+def split_string(value):
+    return _pattern.split(value)
+
+
+@atomic
+def rebuild_index():
+    models.IndexedWord.objects.all().delete()
+    for model, field_names in utils.get_indexed_fields().items():
+        for instance in model.objects.only(*field_names):
+            add_instance_to_index(instance, field_names)
+    clean_duplicate()
+
+
+def clean_duplicate():
+    models.IndexedWord.objects.annotate(
+        is_duplicate=Exists(
+            models.IndexedWord.objects.filter(
+                word__iexact=OuterRef('word'),
+                pk__gt=OuterRef('pk'),
+            )
+        )
+    ).filter(is_duplicate=True).delete()
+
+
+def add_instance_to_index(instance, field_names: List[str]):
+    for field_name in field_names:
+        add_text_to_index(getattr(instance, field_name))
+
+
+def add_text_to_index(value: str):
+    if value is None or value == '':
+        return
+    objects = []
+    for word in split_string(value):
+        len_word = len(word)
+        if len_word < _AUTOCOMPLETE_MIN_LENGTH or word.isdecimal() or len_word > 64:
+            continue
+        objects.append(models.IndexedWord(word=word))
+    models.IndexedWord.objects.bulk_create(objects)
+
+
+def get_closest_matching_words(word: str, limit: Optional[int] = None, min_similarity: Optional[float] = None):
+    # remove accent from the searched word with postgres
+    with connection.cursor() as cursor:
+        cursor.execute("SELECT UNACCENT(%s) as value", [word])
+        word = cursor.fetchone()[0]
+    if limit is None:
+        limit = _AUTOCOMPLETE_LIMIT
+    if min_similarity is None:
+        min_similarity = _AUTOCOMPLETE_MIN_SIMILARITY
+    qs = models.IndexedWord.objects
+    # search on un-accented word
+    qs = qs.annotate(ac_word=Unaccent('word'))
+    # get the trigram similarity
+    qs = qs.annotate(ac_word_s_tri=TrigramSimilarity('ac_word', word))
+    # test if the word start with the searched word, if so give a bonus
+    qs = qs.annotate(ac_word_bonus=Case(When(ac_word__startswith=word, then=Value(1.0)), default=Value(0.0)))
+    # sum similarity and bonus
+    qs = qs.annotate(similarity=F('ac_word_s_tri') + F('ac_word_bonus'))
+    # filter by min similarity and order it
+    qs = qs.filter(similarity__gt=min_similarity).order_by('-similarity')
+    if limit < 0:  # allows to have all results
+        return qs
+    return qs[:limit]
diff --git a/autocomplete_multi_models/expose_with/__init__.py b/autocomplete_multi_models/expose_with/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/expose_with/graphene/__init__.py b/autocomplete_multi_models/expose_with/graphene/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/expose_with/graphene/example.md b/autocomplete_multi_models/expose_with/graphene/example.md
new file mode 100644
index 0000000000000000000000000000000000000000..64e1ccbc2a901056cdc4cf2bd56649317459e57c
--- /dev/null
+++ b/autocomplete_multi_models/expose_with/graphene/example.md
@@ -0,0 +1,85 @@
+```
+{
+  autocomplete(q: "Homer") {
+    edges {
+      node {
+        word,
+        similarity
+      }
+    }
+  }
+}
+```
+
+returns
+
+```json
+{
+  "data": {
+    "autocomplete": {
+      "edges": [
+        {
+          "node": {
+            "word": "home",
+            "similarity": 0.5714286
+          }
+        },
+        {
+          "node": {
+            "word": "homodimer",
+            "similarity": 0.45454547
+          }
+        },
+        {
+          "node": {
+            "word": "homo",
+            "similarity": 0.375
+          }
+        },
+        {
+          "node": {
+            "word": "homemade",
+            "similarity": 0.36363637
+          }
+        },
+        {
+          "node": {
+            "word": "HMMER",
+            "similarity": 0.33333334
+          }
+        },
+        {
+          "node": {
+            "word": "homme",
+            "similarity": 0.33333334
+          }
+        },
+        {
+          "node": {
+            "word": "homeostatic",
+            "similarity": 0.2857143
+          }
+        },
+        {
+          "node": {
+            "word": "homeodomain",
+            "similarity": 0.2857143
+          }
+        },
+        {
+          "node": {
+            "word": "homeostasis",
+            "similarity": 0.2857143
+          }
+        },
+        {
+          "node": {
+            "word": "monomer",
+            "similarity": 0.27272728
+          }
+        }
+      ]
+    }
+  }
+}
+```
\ No newline at end of file
diff --git a/autocomplete_multi_models/expose_with/graphene/schema.py b/autocomplete_multi_models/expose_with/graphene/schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..9930dd449743a87082ac0a61878479da9e5fa9c2
--- /dev/null
+++ b/autocomplete_multi_models/expose_with/graphene/schema.py
@@ -0,0 +1,31 @@
+import django_filters
+import graphene
+from graphene_django.filter import DjangoFilterConnectionField
+from graphene_django.types import DjangoObjectType
+
+import autocomplete_multi_models.business_process
+import autocomplete_multi_models.models
+
+
+class IndexedWordFilter(django_filters.FilterSet):
+    q = django_filters.CharFilter(method='resolve_q')
+
+    class Meta:
+        model = autocomplete_multi_models.models.IndexedWord
+        exclude = ('id',)
+
+    def resolve_q(self, queryset, name, value, *args, **kwargs):
+        return autocomplete_multi_models.business_process.get_closest_matching_words(value)
+
+
+class IndexedWordNode(DjangoObjectType):
+    similarity = graphene.Field(graphene.Float)
+
+    class Meta:
+        model = autocomplete_multi_models.models.IndexedWord
+        interfaces = (graphene.Node,)
+
+
+class FindClosestWordsQuery(graphene.ObjectType):
+
+    autocomplete = DjangoFilterConnectionField(IndexedWordNode, filterset_class=IndexedWordFilter)
diff --git a/autocomplete_multi_models/expose_with/rest_framework/__init__.py b/autocomplete_multi_models/expose_with/rest_framework/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/expose_with/rest_framework/serializers.py b/autocomplete_multi_models/expose_with/rest_framework/serializers.py
new file mode 100644
index 0000000000000000000000000000000000000000..04064ce7aeda3619b56bec4cf21ae38f404bab41
--- /dev/null
+++ b/autocomplete_multi_models/expose_with/rest_framework/serializers.py
@@ -0,0 +1,30 @@
+from rest_framework import serializers
+
+from autocomplete_multi_models import models
+
+
+class IndexedWordSerializer(serializers.ModelSerializer):
+    class Meta:
+        model = models.IndexedWord
+        fields = [
+            'word',
+            'similarity',
+        ]
+
+    similarity = serializers.FloatField()
+
+
+class SearchSerializer(serializers.Serializer):
+    q = serializers.CharField(
+        min_length=2,
+        required=True,
+    )
+    limit = serializers.IntegerField(
+        min_value=1,
+        default=10,
+        required=False,
+    )
+    min_similarity = serializers.FloatField(
+        required=False,
+        default=None,
+    )
diff --git a/autocomplete_multi_models/expose_with/rest_framework/urls.py b/autocomplete_multi_models/expose_with/rest_framework/urls.py
new file mode 100644
index 0000000000000000000000000000000000000000..52043d2680388bc5d7cd1bbb4b05ebf0697ce6d9
--- /dev/null
+++ b/autocomplete_multi_models/expose_with/rest_framework/urls.py
@@ -0,0 +1,7 @@
+from django.urls import path
+
+from autocomplete_multi_models.expose_with.rest_framework.views import FindClosestWords
+
+urlpatterns = [
+    path('autocomplete/', FindClosestWords.as_view()),
+]
diff --git a/autocomplete_multi_models/expose_with/rest_framework/views.py b/autocomplete_multi_models/expose_with/rest_framework/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..48140a0eee2bea3c0c76bfbc0373320e3d90b402
--- /dev/null
+++ b/autocomplete_multi_models/expose_with/rest_framework/views.py
@@ -0,0 +1,27 @@
+from rest_framework import views, response, status
+
+from autocomplete_multi_models import business_process
+from autocomplete_multi_models.expose_with.rest_framework import serializers
+
+
+class FindClosestWords(views.APIView):
+    def get(self, request, format=None):
+        return self.do_the_work(request.GET)
+
+    def post(self, request, format=None):
+        return self.do_the_work(request.data)
+
+    def do_the_work(self, data):
+        # {"q":"rna","min_similarity":0.1,"limit":8}
+        search_serializer = serializers.SearchSerializer(data=data)
+        if not search_serializer.is_valid():
+            return response.Response(search_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
+        results_serializer = serializers.IndexedWordSerializer(
+            business_process.get_closest_matching_words(
+                search_serializer.data['q'],
+                search_serializer.data['limit'],
+                search_serializer.data['min_similarity'],
+            ),
+            many=True,
+        )
+        return response.Response(results_serializer.data)
diff --git a/autocomplete_multi_models/management/__init__.py b/autocomplete_multi_models/management/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/management/commands/__init__.py b/autocomplete_multi_models/management/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/management/commands/makeautocompleteindex.py b/autocomplete_multi_models/management/commands/makeautocompleteindex.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd389266acbe05527259684b75491ca032c46e24
--- /dev/null
+++ b/autocomplete_multi_models/management/commands/makeautocompleteindex.py
@@ -0,0 +1,21 @@
+import time
+from django.core.management import BaseCommand
+from django.db import transaction
+
+from autocomplete_multi_models import business_process, models, utils
+
+
+class Command(BaseCommand):
+    def add_arguments(self, parser):
+        parser.add_argument('--forced', action='store_true')
+
+    @transaction.atomic
+    def handle(self, *args, **options):
+        if not (options['forced'] or business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True)):
+            return
+
+        ts = time.time()
+        business_process.rebuild_index()
+        te = time.time()
+        print(f"Index rebuild in {int((te - ts) * 100) / 100}s, it contains {models.IndexedWord.objects.count()} words")
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, False)
diff --git a/autocomplete_multi_models/migrations/0001_initial.py b/autocomplete_multi_models/migrations/0001_initial.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ee92cee65a7df3beb265ca1ea5a6fc824eee1bd
--- /dev/null
+++ b/autocomplete_multi_models/migrations/0001_initial.py
@@ -0,0 +1,24 @@
+# Generated by Django 3.2.9 on 2022-02-11 11:47
+from django.contrib.postgres.operations import TrigramExtension, BtreeGinExtension, UnaccentExtension
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    initial = True
+
+    dependencies = [
+    ]
+
+    operations = [
+        TrigramExtension(),
+        BtreeGinExtension(),
+        UnaccentExtension(),
+        migrations.CreateModel(
+            name='IndexedWord',
+            fields=[
+                ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('word', models.CharField(db_index=True, max_length=64)),
+            ],
+        ),
+    ]
diff --git a/autocomplete_multi_models/migrations/__init__.py b/autocomplete_multi_models/migrations/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/models.py b/autocomplete_multi_models/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..679c0a1a484731ce4c9958fc93d3810041412d03
--- /dev/null
+++ b/autocomplete_multi_models/models.py
@@ -0,0 +1,17 @@
+import django.contrib.postgres.indexes
+import django.db.models
+
+
+# Create your models here.
+class IndexedWord(django.db.models.Model):
+    indexes = [
+        django.contrib.postgres.indexes.GinIndex(fields=['word']),
+    ]
+    word = django.db.models.CharField(
+        max_length=64,
+        db_index=True,
+        null=False,
+    )
+
+    def __str__(self):
+        return self.word
diff --git a/autocomplete_multi_models/signals.py b/autocomplete_multi_models/signals.py
new file mode 100644
index 0000000000000000000000000000000000000000..49299c48f665cdae0b8fb8f1c0eaf56d04eedccf
--- /dev/null
+++ b/autocomplete_multi_models/signals.py
@@ -0,0 +1,10 @@
+from autocomplete_multi_models import business_process, utils
+
+
+def instance_update(sender, instance, field_names, **kwargs):
+    business_process.add_instance_to_index(instance, field_names)
+    business_process.clean_duplicate()
+
+
+def instance_delete(sender, instance, field_names, **kwargs):
+    business_process.set_setting_in_storage(utils.REBUILD_NEEDED, True)
diff --git a/autocomplete_multi_models/tests/__init__.py b/autocomplete_multi_models/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/autocomplete_multi_models/tests/settings_storage_file_based.py b/autocomplete_multi_models/tests/settings_storage_file_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ab198b6eb235e296ca19531f9f08705dc7bec4f
--- /dev/null
+++ b/autocomplete_multi_models/tests/settings_storage_file_based.py
@@ -0,0 +1,24 @@
+import json
+
+
+def get_data():
+    try:
+        with open('/tmp/autocomplete_multi_models.json', 'r') as f:
+            return json.load(f)
+    except OSError:
+        return dict()
+
+
+def save_data(data):
+    with open('/tmp/autocomplete_multi_models.json', 'w') as f:
+        return json.dump(data, f, indent=4)
+
+
+def get_fcn(key, default):
+    return get_data().get(key, default)
+
+
+def set_fcn(key, value):
+    data = get_data()
+    data[key] = value
+    save_data(data)
diff --git a/autocomplete_multi_models/tests/test_business_process.py b/autocomplete_multi_models/tests/test_business_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..068d5d833156f884291d24e26e1738f9b94a4003
--- /dev/null
+++ b/autocomplete_multi_models/tests/test_business_process.py
@@ -0,0 +1,147 @@
+import logging
+
+from django.conf import settings
+from django.core import management
+from django.test import TestCase, override_settings
+
+from autocomplete_multi_models import business_process, models, utils, signals
+from autocomplete_multi_models.tests import test_helpers
+
+logger = logging.getLogger(__name__)
+
+
+class AutoCompleteTestCase(TestCase):
+    def test_unaccent_ok(self):
+        business_process.add_text_to_index("azertyêazerty azertyaezerty")
+        business_process.clean_duplicate()
+        self.assertEqual(models.IndexedWord.objects.count(), 2)
+
+    def test_split_ok(self):
+        business_process.add_text_to_index("abc (abc) abc|abc,,,,]]]abc")
+        business_process.clean_duplicate()
+        self.assertEqual(models.IndexedWord.objects.count(), 1)
+
+    def test_case_ignored(self):
+        business_process.add_text_to_index("Nous nous")
+        business_process.clean_duplicate()
+        self.assertEqual(models.IndexedWord.objects.count(), 1)
+
+    def test_init_from_settings_fails(self):
+        settings.AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER = (
+            "autocomplete_multi_models.tests.settings_storage_file_based.get_fcn",
+            "autocomplete_multi_models.tests.settings_storage_file_based.get_fcn",
+        )
+        self.assertRaises(AssertionError, utils.init_from_settings)
+        settings.AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER = (
+            "autocomplete_multi_models.tests.settings_storage_file_based.get_fcn",
+            "autocomplete_multi_models.tests.settings_storage_file_based.get_data",
+        )
+        self.assertRaises(TypeError, utils.init_from_settings)
+        settings.AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER = (
+            "autocomplete_multi_models.tests.settings_storage_file_based.get_fcn",
+            "autocomplete_multi_models.tests.settings_storage_file_based.save_data",
+        )
+        self.assertRaises(TypeError, utils.init_from_settings)
+        settings.AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER = (
+            "autocomplete_multi_models.tests.settings_storage_file_based.get_fcn",
+            "autocomplete_multi_models.tests.settings_storage_file_based.blabla",
+        )
+        self.assertRaises(ImportError, utils.init_from_settings)
+        settings.AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER = None
+
+    def test_accent_are_ignored(self):
+        models.IndexedWord.objects.create(word="ARNtoto")
+        models.IndexedWord.objects.create(word="ÄRNtoto")
+        models.IndexedWord.objects.create(word="RNtoto")
+
+        qs = business_process.get_closest_matching_words("ARN", limit=-1, min_similarity=-1)
+        self.assertEqual(qs.get(word="ARNtoto").similarity, qs.get(word="ÄRNtoto").similarity)
+
+        qs = business_process.get_closest_matching_words("ÄRN", limit=-1, min_similarity=-1)
+        self.assertEqual(qs.get(word="ARNtoto").similarity, qs.get(word="ÄRNtoto").similarity)
+
+        qs = business_process.get_closest_matching_words("ÄRNtoto", limit=-1, min_similarity=-1)
+        self.assertGreater(qs.get(word="ARNtoto").similarity, qs.get(word="RNtoto").similarity)
+
+
+@override_settings(AUTOCOMPLETE_MIN_LENGTH=1)
+class MinLength1(test_helpers.ChangeAutoCompleteSettingsTestCase):
+    def test_it(self):
+        business_process.add_text_to_index("a bb ccc ddd")
+        self.assertEqual(models.IndexedWord.objects.count(), 4)
+
+
+@override_settings(AUTOCOMPLETE_MIN_LENGTH=2)
+class MinLength2(test_helpers.ChangeAutoCompleteSettingsTestCase):
+    def test_it(self):
+        business_process.add_text_to_index("a bb ccc ddd")
+        self.assertEqual(models.IndexedWord.objects.count(), 3)
+
+
+@override_settings(AUTOCOMPLETE_MIN_LENGTH=3)
+class MinLength3(test_helpers.ChangeAutoCompleteSettingsTestCase):
+    def test_it(self):
+        business_process.add_text_to_index("a bb ccc ddd")
+        self.assertEqual(models.IndexedWord.objects.count(), 2)
+
+
+@override_settings(AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER=None)
+class NeedRebuildDefaultBehaviorTestCase(test_helpers.ChangeAutoCompleteSettingsTestCase):
+    def test_it(self):
+        self.assertTrue(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, False)
+        self.assertTrue(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, True)
+        self.assertTrue(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+        management.call_command('makeautocompleteindex')
+        self.assertTrue(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+        self.assertEqual(models.IndexedWord.objects.count(), 0)
+
+
+@override_settings(
+    AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER=(
+        "autocomplete_multi_models.tests.settings_storage_file_based.get_fcn",
+        "autocomplete_multi_models.tests.settings_storage_file_based.set_fcn",
+    ),
+)
+class NeedRebuildFileSettingsBasedTestCase(test_helpers.ChangeAutoCompleteSettingsTestCase):
+    def test_it(self):
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, "eeeeee")
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, False)
+        self.assertFalse(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, True)
+
+        models.IndexedWord.objects.create(word="TOTO")
+        self.assertTrue(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+        management.call_command('makeautocompleteindex')
+        self.assertFalse(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, False))
+        self.assertEqual(models.IndexedWord.objects.count(), 0)
+
+        models.IndexedWord.objects.create(word="TOTO")
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, False)
+        management.call_command('makeautocompleteindex')
+        self.assertEqual(models.IndexedWord.objects.count(), 1)
+
+        business_process.set_setting_in_storage(utils.REBUILD_NEEDED, False)
+        management.call_command('makeautocompleteindex', forced=True)
+        self.assertEqual(models.IndexedWord.objects.count(), 0)
+
+        signals.instance_delete(sender=None, instance=None, field_names=[])
+        self.assertTrue(business_process.get_setting_from_storage(utils.REBUILD_NEEDED, True))
+
+
+class WithTextModelTestCase(test_helpers.WithTextModelTestCase):
+    def test_unmanaged_model(self):
+        with self.assertNumQueries(num=3):
+            self.assertEqual(0, test_helpers.WithTextModel.objects.all().count())
+            test_helpers.WithTextModel.objects.create(text="bla")
+            self.assertEqual(1, test_helpers.WithTextModel.objects.all().count())
+
+    def test_signals_instance_update(self):
+        o = test_helpers.WithTextModel.objects.create(text="poney")
+        self.assertEqual(0, models.IndexedWord.objects.count())
+        signals.instance_update(sender=test_helpers.WithTextModel, instance=o, field_names=["text"])
+        self.assertEqual(1, models.IndexedWord.objects.count())
+
+        signals.instance_delete(sender=test_helpers.WithTextModel, instance=o, field_names=["text"])
+        self.assertEqual(1, models.IndexedWord.objects.count())
diff --git a/autocomplete_multi_models/tests/test_helpers.py b/autocomplete_multi_models/tests/test_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..8739e7545d53184ec075aabb58a26ca4c901ca75
--- /dev/null
+++ b/autocomplete_multi_models/tests/test_helpers.py
@@ -0,0 +1,43 @@
+from django.db import connection
+from django.db import models
+from django.test import TestCase
+
+from autocomplete_multi_models import utils
+
+
+class ChangeAutoCompleteSettingsTestCase(TestCase):
+    class Meta:
+        abstract = True
+
+    def setUp(self) -> None:
+        utils.init_from_settings()  # needed in test as app in only initialized once for test
+
+
+class WithTextModel(models.Model):
+    text = models.CharField(max_length=64)
+
+    class Meta:
+        managed = False
+        db_table = 'unmanaged_with_text_table'
+
+    def __str__(self):
+        return self.text
+
+
+class WithTextModelTestCase(TestCase):
+    def setUp(self):
+        super().setUp()
+
+        with connection.schema_editor() as schema_editor:
+            schema_editor.create_model(WithTextModel)
+
+            if WithTextModel._meta.db_table not in connection.introspection.table_names():
+                raise ValueError(
+                    "Table `{table_name}` is missing in test database.".format(table_name=WithTextModel._meta.db_table)
+                )
+
+    def tearDown(self):
+        super().tearDown()
+
+        with connection.schema_editor() as schema_editor:
+            schema_editor.delete_model(WithTextModel)
diff --git a/autocomplete_multi_models/utils.py b/autocomplete_multi_models/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6931cc0b426cde8666b6e9c001f0900a13d3c58f
--- /dev/null
+++ b/autocomplete_multi_models/utils.py
@@ -0,0 +1,73 @@
+from django.conf import settings
+from django.utils.module_loading import import_string
+from django.apps import apps
+
+DEFAULT_AUTOCOMPLETE_MIN_LENGTH = 4
+DEFAULT_AUTOCOMPLETE_MIN_SIMILARITY = 0.3
+DEFAULT_AUTOCOMPLETE_LIMIT = 10
+REBUILD_NEEDED = "is_autocomplete_multi_models_rebuild_needed"
+
+
+def get_indexed_fields():
+    __indexed_fields = dict()
+    for app_name, model_name, field_name in settings.AUTOCOMPLETE_TARGET_FIELDS:
+        __indexed_fields.setdefault(apps.get_model(app_name, model_name), []).append(field_name)
+    return __indexed_fields
+
+
+def init_from_settings():
+    from . import business_process
+
+    business_process._AUTOCOMPLETE_MIN_LENGTH = getattr(
+        settings,
+        'AUTOCOMPLETE_MIN_LENGTH',
+        DEFAULT_AUTOCOMPLETE_MIN_LENGTH,
+    )
+    business_process._AUTOCOMPLETE_MIN_SIMILARITY = getattr(
+        settings,
+        'AUTOCOMPLETE_MIN_SIMILARITY',
+        DEFAULT_AUTOCOMPLETE_MIN_SIMILARITY,
+    )
+    business_process._AUTOCOMPLETE_LIMIT = getattr(
+        settings,
+        'AUTOCOMPLETE_LIMIT',
+        DEFAULT_AUTOCOMPLETE_LIMIT,
+    )
+    get_set_names = getattr(settings, 'AUTOCOMPLETE_PERSISTENT_VARIABLE_GETTER_SETTER', None)
+    if get_set_names is not None:
+        get_fcn = import_string(get_set_names[0])
+        set_fcn = import_string(get_set_names[1])
+
+        set_fcn("autocomplete_multi_models_test_settings", 1)
+        assert get_fcn("autocomplete_multi_models_test_settings", 3) == 1
+        set_fcn("autocomplete_multi_models_test_settings", 2)
+        assert get_fcn("autocomplete_multi_models_test_settings", 3) == 2
+
+        business_process.get_setting_from_storage = get_fcn
+        business_process.set_setting_in_storage = set_fcn
+
+
+# def get_min_length():
+#     return
+#
+#
+# def get_min_similarity():
+#     return
+#
+#
+# def get_limit():
+#     return
+#
+#
+# def get_get_key_in_persistent_setting_storage():
+#     def fcn(key):
+#         return key == REBUILD_NEEDED
+#
+#     return fcn
+#
+#
+# def get_set_key_in_persistent_setting_storage():
+#     def fcn(key, value):
+#         pass
+#
+#     return fcn