]> git.openstreetmap.org Git - nominatim.git/commitdiff
add new command for cleaning word tokens
authorSarah Hoffmann <lonvia@denofr.de>
Thu, 20 Jan 2022 19:05:15 +0000 (20:05 +0100)
committerSarah Hoffmann <lonvia@denofr.de>
Thu, 20 Jan 2022 19:05:15 +0000 (20:05 +0100)
Just pulls outdated housenumbers for the moment.

nominatim/clicmd/refresh.py
nominatim/tokenizer/base.py
nominatim/tokenizer/icu_tokenizer.py
nominatim/tokenizer/legacy_tokenizer.py

index 4df283f8c781c8a7ecc41ef58d6ced7601bb2557..c741dcf63632fc0c01d8592a66f46d3be0c8bdbd 100644 (file)
@@ -39,6 +39,8 @@ class UpdateRefresh:
         group = parser.add_argument_group('Data arguments')
         group.add_argument('--postcodes', action='store_true',
                            help='Update postcode centroid table')
+        group.add_argument('--word-tokens', action='store_true',
+                           help='Clean up search terms')
         group.add_argument('--word-counts', action='store_true',
                            help='Compute frequency of full-word search terms')
         group.add_argument('--address-levels', action='store_true',
@@ -76,6 +78,10 @@ class UpdateRefresh:
                 LOG.error("The place table doesn't exist. "
                           "Postcode updates on a frozen database is not possible.")
 
+        if args.word_tokens:
+            tokenizer = self._get_tokenizer(args.config)
+            tokenizer.update_word_tokens()
+
         if args.word_counts:
             LOG.warning('Recompute word statistics')
             self._get_tokenizer(args.config).update_statistics()
index 980dc69ea00c290a46ed59286b9a72d2a4588df4..f81b3bc262ed9f690b0eb6bc794e2cab076de822 100644 (file)
@@ -209,6 +209,13 @@ class AbstractTokenizer(ABC):
         """
 
 
+    @abstractmethod
+    def update_word_tokens(self) -> None:
+        """ Do house-keeping on the tokenizers internal data structures.
+            Remove unused word tokens, resort data etc.
+        """
+
+
     @abstractmethod
     def name_analyzer(self) -> AbstractAnalyzer:
         """ Create a new analyzer for tokenizing names and queries
index cfbb44e3d356c85a9317657fb7df38dd08b3539a..da07897bd3e28a4ee638c92b01bb43141c7d4c5b 100644 (file)
@@ -112,6 +112,39 @@ class LegacyICUTokenizer(AbstractTokenizer):
             conn.commit()
 
 
+    def _cleanup_housenumbers(self):
+        """ Remove unused house numbers.
+        """
+        with connect(self.dsn) as conn:
+            with conn.cursor(name="hnr_counter") as cur:
+                cur.execute("""SELECT word_id, word_token FROM word
+                               WHERE type = 'H'
+                                 AND NOT EXISTS(SELECT * FROM search_name
+                                                WHERE ARRAY[word.word_id] && name_vector)
+                                 AND (char_length(word_token) > 6
+                                      OR word_token not similar to '\d+')
+                            """)
+                candidates = {token: wid for wid, token in cur}
+            with conn.cursor(name="hnr_counter") as cur:
+                cur.execute("""SELECT housenumber FROM placex
+                               WHERE housenumber is not null
+                                     AND (char_length(housenumber) > 6
+                                          OR housenumber not similar to '\d+')
+                            """)
+                for row in cur:
+                    for hnr in row[0].split(';'):
+                        candidates.pop(hnr, None)
+        LOG.info("There are %s outdated housenumbers.", len(candidates))
+
+
+    def update_word_tokens(self):
+        """ Remove unused tokens.
+        """
+        LOG.info("Cleaning up housenumber tokens.")
+        self._cleanup_housenumbers()
+        LOG.info("Tokenizer house-keeping done.")
+
+
     def name_analyzer(self):
         """ Create a new analyzer for tokenizing names and queries
             using this tokinzer. Analyzers are context managers and should
index 551b0536b88dbe77012a364015005a60cbe19548..7ce6b24250f4d303ed229216fa29ba92dd6dd095 100644 (file)
@@ -211,6 +211,13 @@ class LegacyTokenizer(AbstractTokenizer):
                     cur.drop_table("word_frequencies")
             conn.commit()
 
+
+    def update_word_tokens(self):
+        """ No house-keeping implemented for the legacy tokenizer.
+        """
+        LOG.info("No tokenizer clean-up available.")
+
+
     def name_analyzer(self):
         """ Create a new analyzer for tokenizing names and queries
             using this tokinzer. Analyzers are context managers and should