]> git.openstreetmap.org Git - nominatim.git/commitdiff
Merge pull request #3375 from matkoniecz/patch-1
authorSarah Hoffmann <lonvia@denofr.de>
Wed, 27 Mar 2024 09:03:45 +0000 (10:03 +0100)
committerGitHub <noreply@github.com>
Wed, 27 Mar 2024 09:03:45 +0000 (10:03 +0100)
add missing space in taginfo listing

nominatim/api/search/db_search_builder.py
nominatim/api/search/geocoder.py

index 97e7ac0282a79b40e7015bcd1069c8edbadeb09e..e27a24d61eb54f0d7bb1bc04abedc90703dacc8a 100644 (file)
@@ -166,7 +166,7 @@ class SearchBuilder:
         sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], lookups.LookupAny)]
         expected_count = sum(t.count for t in hnrs)
 
-        partials = {t.token: t.count for trange in address
+        partials = {t.token: t.addr_count for trange in address
                        for t in self.query.get_partials_list(trange)}
 
         if expected_count < 8000:
@@ -222,6 +222,7 @@ class SearchBuilder:
             yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
             return
 
+        addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000
         # Partial term to frequent. Try looking up by rare full names first.
         name_fulls = self.query.get_tokens(name, TokenType.WORD)
         if name_fulls:
@@ -231,14 +232,16 @@ class SearchBuilder:
             if partials_indexed:
                 penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
 
-            yield penalty,fulls_count / (2**len(addr_tokens)), \
-                  self.get_full_name_ranking(name_fulls, addr_partials,
-                                             fulls_count > 30000 / max(1, len(addr_tokens)))
+            if fulls_count < 50000 or addr_count < 30000:
+                yield penalty,fulls_count / (2**len(addr_tokens)), \
+                    self.get_full_name_ranking(name_fulls, addr_partials,
+                                               fulls_count > 30000 / max(1, len(addr_tokens)))
 
         # To catch remaining results, lookup by name and address
         # We only do this if there is a reasonable number of results expected.
         exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
-        if exp_count < 10000 and all(t.is_indexed for t in name_partials.values()):
+        if exp_count < 10000 and addr_count < 20000\
+           and all(t.is_indexed for t in name_partials.values()):
             penalty += 0.35 * max(1 if name_fulls else 0.1,
                                   5 - len(name_partials) - len(addr_tokens))
             yield penalty, exp_count,\
index 711f83833f9408ff980c29f5eeca046b8baa28d6..775606aab755d6f68180e696828115c285a99f11 100644 (file)
@@ -95,7 +95,7 @@ class ForwardGeocoder:
                     prevresult.accuracy = min(prevresult.accuracy, result.accuracy)
                 else:
                     results[rhash] = result
-                min_ranking = min(min_ranking, result.accuracy * 1.2)
+                min_ranking = min(min_ranking, result.accuracy * 1.2, 2.0)
             log().result_dump('Results', ((r.accuracy, r) for r in lookup_results))
             prev_penalty = search.penalty
             if dt.datetime.now() >= end_time: