]> git.openstreetmap.org Git - nominatim.git/commitdiff
make word recount a tokenizer-specific function
authorSarah Hoffmann <lonvia@denofr.de>
Tue, 19 Oct 2021 09:21:16 +0000 (11:21 +0200)
committerSarah Hoffmann <lonvia@denofr.de>
Tue, 19 Oct 2021 09:21:16 +0000 (11:21 +0200)
lib-sql/words_from_search_name.sql [deleted file]
nominatim/clicmd/refresh.py
nominatim/tokenizer/base.py
nominatim/tokenizer/icu_tokenizer.py
nominatim/tokenizer/legacy_tokenizer.py
nominatim/tools/refresh.py

diff --git a/lib-sql/words_from_search_name.sql b/lib-sql/words_from_search_name.sql
deleted file mode 100644 (file)
index b7727dc..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-DROP TABLE IF EXISTS word_frequencies;
-CREATE TABLE word_frequencies AS
- SELECT unnest(name_vector) as id, count(*) FROM search_name GROUP BY id;
-
-CREATE INDEX idx_word_frequencies ON word_frequencies(id);
-
-UPDATE word SET search_name_count = count
-  FROM word_frequencies
- WHERE word_token like ' %' and word_id = id;
-
-DROP TABLE word_frequencies;
index aa540f6b253e7fada9500767ba01a3d6383093e3..e7d7d7ba1d33cc032b3c0260a5ffc3d05f772344 100644 (file)
@@ -71,8 +71,8 @@ class UpdateRefresh:
                           "Postcode updates on a frozen database is not possible.")
 
         if args.word_counts:
-            LOG.warning('Recompute frequency of full-word search terms')
-            refresh.recompute_word_counts(args.config.get_libpq_dsn(), args.sqllib_dir)
+            LOG.warning('Recompute word statistics')
+            self._get_tokenizer(args.config).update_statistics()
 
         if args.address_levels:
             cfg = Path(args.config.ADDRESS_LEVEL_CONFIG)
index 02bc312f18dc5e0bbe65fcacfc7e3564f805d441..94fac1fc4e1cdcab553af27f6fe1a94b21a9f95f 100644 (file)
@@ -205,6 +205,16 @@ class AbstractTokenizer(ABC):
         pass
 
 
+    @abstractmethod
+    def update_statistics(self) -> None:
+        """ Recompute any tokenizer statistics necessary for efficient lookup.
+            This function is meant to be called from time to time by the user
+            to improve performance. However, the tokenizer must not depend on
+            it to be called in order to work.
+        """
+        pass
+
+
     @abstractmethod
     def name_analyzer(self) -> AbstractAnalyzer:
         """ Create a new analyzer for tokenizing names and queries
index 12d1eccd15f1799b6b45af4df6b0b39ec6a93674..686fbd7939ee70b10a5a7557cec334ba0e324733 100644 (file)
@@ -93,6 +93,25 @@ class LegacyICUTokenizer(AbstractTokenizer):
         return None
 
 
+    def update_statistics(self):
+        """ Recompute frequencies for all name words.
+        """
+        with connect(self.dsn) as conn:
+            with conn.cursor() as cur:
+                cur.drop_table("word_frequencies")
+                LOG.info("Computing word frequencies")
+                cur.execute("""CREATE TEMP TABLE word_frequencies AS
+                                 SELECT unnest(name_vector) as id, count(*)
+                                 FROM search_name GROUP BY id""")
+                cur.execute("CREATE INDEX ON word_frequencies(id)")
+                LOG.info("Update word table with recomputed frequencies")
+                cur.execute("""UPDATE word
+                               SET info = info || jsonb_build_object('count', count)
+                               FROM word_frequencies WHERE word_id = id""")
+                cur.drop_table("word_frequencies")
+            conn.commit()
+
+
     def name_analyzer(self):
         """ Create a new analyzer for tokenizing names and queries
             using this tokinzer. Analyzers are context managers and should
index c935f20d4a9836e0f1c97ab74a5ce93a98b99ba1..d901a68d2e53f77e5c96210c11ede863e7e5e36f 100644 (file)
@@ -186,6 +186,24 @@ class LegacyTokenizer(AbstractTokenizer):
             self._save_config(conn, config)
 
 
+    def update_statistics(self):
+        """ Recompute the frequency of full words.
+        """
+        with connect(self.dsn) as conn:
+            with conn.cursor() as cur:
+                cur.drop_table("word_frequencies")
+                LOG.info("Computing word frequencies")
+                cur.execute("""CREATE TEMP TABLE word_frequencies AS
+                                 SELECT unnest(name_vector) as id, count(*)
+                                 FROM search_name GROUP BY id""")
+                cur.execute("CREATE INDEX ON word_frequencies(id)")
+                LOG.info("Update word table with recomputed frequencies")
+                cur.execute("""UPDATE word SET search_name_count = count
+                               FROM word_frequencies
+                               WHERE word_token like ' %' and word_id = id""")
+                cur.drop_table("word_frequencies")
+            conn.commit()
+
     def name_analyzer(self):
         """ Create a new analyzer for tokenizing names and queries
             using this tokinzer. Analyzers are context managers and should
index 5aaee0c8d1d8417a5a88c4b7a317a2d2f37c4467..00ae5dc95bb4c3c5fa0205e74ab49abf61b51e13 100644 (file)
@@ -14,12 +14,6 @@ from nominatim.version import NOMINATIM_VERSION
 LOG = logging.getLogger()
 
 
-def recompute_word_counts(dsn, sql_dir):
-    """ Compute the frequency of full-word search terms.
-    """
-    execute_file(dsn, sql_dir / 'words_from_search_name.sql')
-
-
 def _add_address_level_rows_from_entry(rows, entry):
     """ Converts a single entry from the JSON format for address rank
         descriptions into a flat format suitable for inserting into a