]> git.openstreetmap.org Git - nominatim.git/commitdiff
avoid lookup via partials on frequent words
authorSarah Hoffmann <lonvia@denofr.de>
Wed, 5 Jul 2023 12:07:11 +0000 (14:07 +0200)
committerSarah Hoffmann <lonvia@denofr.de>
Thu, 6 Jul 2023 10:16:57 +0000 (12:16 +0200)
Drops expensive searches via partials on terms like 'rue de'.

See #2979.

nominatim/api/search/db_search_builder.py
test/python/api/search/test_db_search_builder.py

index 67db32479fc6aa8ad6dc87fd0e8d80897cf5d1c8..2a3153be334d17ed3262853cd156172febb0f8b3 100644 (file)
@@ -235,19 +235,21 @@ class SearchBuilder:
             yield penalty, sum(t.count for t in rare_names), lookup
 
         # To catch remaining results, lookup by name and address
-        if all(t.is_indexed for t in name_partials):
-            lookup = [dbf.FieldLookup('name_vector',
-                                      [t.token for t in name_partials], 'lookup_all')]
-        else:
-            # we don't have the partials, try with the non-rare names
-            non_rare_names = [t.token for t in name_fulls if t.count >= 1000]
-            if not non_rare_names:
-                return
-            lookup = [dbf.FieldLookup('name_vector', non_rare_names, 'lookup_any')]
-        if addr_tokens:
-            lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
-        yield penalty + 0.1 * max(0, 5 - len(name_partials) - len(addr_tokens)),\
-              min(exp_name_count, exp_addr_count), lookup
+        # We only do this if there is a reasonable number of results expected.
+        if min(exp_name_count, exp_addr_count) < 10000:
+            if all(t.is_indexed for t in name_partials):
+                lookup = [dbf.FieldLookup('name_vector',
+                                          [t.token for t in name_partials], 'lookup_all')]
+            else:
+                # we don't have the partials, try with the non-rare names
+                non_rare_names = [t.token for t in name_fulls if t.count >= 1000]
+                if not non_rare_names:
+                    return
+                lookup = [dbf.FieldLookup('name_vector', non_rare_names, 'lookup_any')]
+            if addr_tokens:
+                lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
+            yield penalty + 0.1 * max(0, 5 - len(name_partials) - len(addr_tokens)),\
+                  min(exp_name_count, exp_addr_count), lookup
 
 
     def get_name_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
index 9631850e1b284fcf760c3171b5cf2c09a78bed64..63589ffc02cd546145734efffffb357132e1e541 100644 (file)
@@ -382,7 +382,7 @@ def test_frequent_partials_in_name_but_not_in_address():
 
 
 def test_frequent_partials_in_name_and_address():
-    searches = make_counted_searches(10000, 1, 10000, 1)
+    searches = make_counted_searches(9999, 1, 9999, 1)
 
     assert len(searches) == 2
 
@@ -393,3 +393,15 @@ def test_frequent_partials_in_name_and_address():
             {('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')}
     assert set((l.column, l.lookup_type) for l in searches[1].lookups) == \
             {('nameaddress_vector', 'lookup_all'), ('name_vector', 'lookup_all')}
+
+
+def test_too_frequent_partials_in_name_and_address():
+    searches = make_counted_searches(10000, 1, 10000, 1)
+
+    assert len(searches) == 1
+
+    assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
+    searches.sort(key=lambda s: s.penalty)
+
+    assert set((l.column, l.lookup_type) for l in searches[0].lookups) == \
+            {('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')}