2 Tokenizer implementing normalisation as used before Nominatim 4.
4 from collections import OrderedDict
10 import psycopg2.extras
12 from nominatim.db.connection import connect
13 from nominatim.db import properties
14 from nominatim.db import utils as db_utils
15 from nominatim.db.sql_preprocessor import SQLPreprocessor
16 from nominatim.errors import UsageError
18 DBCFG_NORMALIZATION = "tokenizer_normalization"
19 DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
21 LOG = logging.getLogger()
23 def create(dsn, data_dir):
24 """ Create a new instance of the tokenizer provided by this module.
26 return LegacyTokenizer(dsn, data_dir)
29 def _install_module(config_module_path, src_dir, module_dir):
30 """ Copies the PostgreSQL normalisation module into the project
31 directory if necessary. For historical reasons the module is
32 saved in the '/module' subdirectory and not with the other tokenizer
35 The function detects when the installation is run from the
36 build directory. It doesn't touch the module in that case.
38 # Custom module locations are simply used as is.
39 if config_module_path:
40 LOG.info("Using custom path for database module at '%s'", config_module_path)
41 return config_module_path
43 # Compatibility mode for builddir installations.
44 if module_dir.exists() and src_dir.samefile(module_dir):
45 LOG.info('Running from build directory. Leaving database module as is.')
48 # In any other case install the module in the project directory.
49 if not module_dir.exists():
52 destfile = module_dir / 'nominatim.so'
53 shutil.copy(str(src_dir / 'nominatim.so'), str(destfile))
56 LOG.info('Database module installed at %s', str(destfile))
61 def _check_module(module_dir, conn):
62 """ Try to use the PostgreSQL module to confirm that it is correctly
63 installed and accessible from PostgreSQL.
65 with conn.cursor() as cur:
67 cur.execute("""CREATE FUNCTION nominatim_test_import_func(text)
68 RETURNS text AS '{}/nominatim.so', 'transliteration'
69 LANGUAGE c IMMUTABLE STRICT;
70 DROP FUNCTION nominatim_test_import_func(text)
71 """.format(module_dir))
72 except psycopg2.DatabaseError as err:
73 LOG.fatal("Error accessing database module: %s", err)
74 raise UsageError("Database module cannot be accessed.") from err
77 class LegacyTokenizer:
78 """ The legacy tokenizer uses a special PostgreSQL module to normalize
79 names and queries. The tokenizer thus implements normalization through
80 calls to the database.
83 def __init__(self, dsn, data_dir):
85 self.data_dir = data_dir
86 self.normalization = None
89 def init_new_db(self, config):
90 """ Set up a new tokenizer for the database.
92 This copies all necessary data in the project directory to make
93 sure the tokenizer remains stable even over updates.
95 module_dir = _install_module(config.DATABASE_MODULE_PATH,
96 config.lib_dir.module,
97 config.project_dir / 'module')
99 self.normalization = config.TERM_NORMALIZATION
101 with connect(self.dsn) as conn:
102 _check_module(module_dir, conn)
103 self._save_config(conn, config)
106 self.update_sql_functions(config)
107 self._init_db_tables(config)
110 def init_from_project(self):
111 """ Initialise the tokenizer from the project directory.
113 with connect(self.dsn) as conn:
114 self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION)
117 def update_sql_functions(self, config):
118 """ Reimport the SQL functions for this tokenizer.
120 with connect(self.dsn) as conn:
121 max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ)
122 modulepath = config.DATABASE_MODULE_PATH or \
123 str((config.project_dir / 'module').resolve())
124 sqlp = SQLPreprocessor(conn, config)
125 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer.sql',
126 max_word_freq=max_word_freq,
127 modulepath=modulepath)
130 def migrate_database(self, config):
131 """ Initialise the project directory of an existing database for
132 use with this tokenizer.
134 This is a special migration function for updating existing databases
135 to new software versions.
137 module_dir = _install_module(config.DATABASE_MODULE_PATH,
138 config.lib_dir.module,
139 config.project_dir / 'module')
141 with connect(self.dsn) as conn:
142 _check_module(module_dir, conn)
143 self._save_config(conn, config)
146 def name_analyzer(self):
147 """ Create a new analyzer for tokenizing names and queries
148 using this tokinzer. Analyzers are context managers and should
152 with tokenizer.name_analyzer() as analyzer:
156 When used outside the with construct, the caller must ensure to
157 call the close() function before destructing the analyzer.
159 Analyzers are not thread-safe. You need to instantiate one per thread.
161 return LegacyNameAnalyzer(self.dsn)
164 def _init_db_tables(self, config):
165 """ Set up the word table and fill it with pre-computed word
168 with connect(self.dsn) as conn:
169 sqlp = SQLPreprocessor(conn, config)
170 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
173 LOG.warning("Precomputing word tokens")
174 db_utils.execute_file(self.dsn, config.lib_dir.data / 'words.sql')
177 def _save_config(self, conn, config):
178 """ Save the configuration that needs to remain stable for the given
179 database as database properties.
181 properties.set_property(conn, DBCFG_NORMALIZATION, self.normalization)
182 properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
186 class LegacyNameAnalyzer:
187 """ The legacy analyzer uses the special Postgresql module for
190 Each instance opens a connection to the database to request the
194 def __init__(self, dsn):
195 self.conn = connect(dsn).connection
196 self.conn.autocommit = True
197 psycopg2.extras.register_hstore(self.conn)
199 self._cache = _TokenCache(self.conn)
206 def __exit__(self, exc_type, exc_value, traceback):
211 """ Free all resources used by the analyzer.
218 def add_postcodes_from_db(self):
219 """ Add postcodes from the location_postcode table to the word table.
221 with self.conn.cursor() as cur:
222 cur.execute("""SELECT count(create_postcode_id(pc))
223 FROM (SELECT distinct(postcode) as pc
224 FROM location_postcode) x""")
226 def process_place(self, place):
227 """ Determine tokenizer information about the given place.
229 Returns a JSON-serialisable structure that will be handed into
230 the database via the token_info field.
232 token_info = _TokenInfo(self._cache)
234 token_info.add_names(self.conn, place.get('name'), place.get('country_feature'))
236 address = place.get('address')
241 for key, value in address.items():
242 if key == 'postcode':
243 self._add_postcode(value)
244 elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
246 elif key == 'street':
247 token_info.add_street(self.conn, value)
249 token_info.add_place(self.conn, value)
250 elif not key.startswith('_') and \
251 key not in ('country', 'full'):
252 addr_terms.append((key, value))
255 token_info.add_housenumbers(self.conn, hnrs)
258 token_info.add_address_terms(self.conn, addr_terms)
260 return token_info.data
263 def _add_postcode(self, postcode):
264 """ Make sure the normalized postcode is present in the word table.
266 def _create_postcode_from_db(pcode):
267 with self.conn.cursor() as cur:
268 cur.execute('SELECT create_postcode_id(%s)', (pcode, ))
270 if re.search(r'[:,;]', postcode) is None:
271 self._cache.postcodes.get(postcode.strip().upper(), _create_postcode_from_db)
275 """ Collect token information to be sent back to the database.
277 def __init__(self, cache):
282 def add_names(self, conn, names, country_feature):
283 """ Add token information for the names of the place.
288 with conn.cursor() as cur:
289 # Create the token IDs for all names.
290 self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text",
293 # Add country tokens to word table if necessary.
294 if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
295 cur.execute("SELECT create_country(%s, %s)",
296 (names, country_feature.lower()))
299 def add_housenumbers(self, conn, hnrs):
300 """ Extract housenumber information from the address.
303 token = self.cache.get_housenumber(hnrs[0])
304 if token is not None:
305 self.data['hnr_tokens'] = token
306 self.data['hnr'] = hnrs[0]
309 # split numbers if necessary
312 simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
314 if len(simple_list) > 1:
315 simple_list = list(set(simple_list))
317 with conn.cursor() as cur:
318 cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, ))
319 self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone()
322 def add_street(self, conn, street):
323 """ Add addr:street match terms.
325 def _get_street(name):
326 with conn.cursor() as cur:
327 return cur.scalar("SELECT word_ids_from_name(%s)::text", (name, ))
329 self.data['street'] = self.cache.streets.get(street, _get_street)
332 def add_place(self, conn, place):
333 """ Add addr:place search and match terms.
335 def _get_place(name):
336 with conn.cursor() as cur:
337 cur.execute("""SELECT (addr_ids_from_name(%s) || getorcreate_name_id(make_standard_name(%s), ''))::text,
338 word_ids_from_name(%s)::text""",
340 return cur.fetchone()
342 self.data['place_search'], self.data['place_match'] = \
343 self.cache.places.get(place, _get_place)
346 def add_address_terms(self, conn, terms):
347 """ Add additional address terms.
349 def _get_address_term(name):
350 with conn.cursor() as cur:
351 cur.execute("""SELECT addr_ids_from_name(%s)::text,
352 word_ids_from_name(%s)::text""",
354 return cur.fetchone()
357 for key, value in terms:
358 tokens[key] = self.cache.address_terms.get(value, _get_address_term)
360 self.data['addr'] = tokens
364 """ Least recently used cache that accepts a generator function to
365 produce the item when there is a cache miss.
368 def __init__(self, maxsize=128, init_data=None):
369 self.data = init_data or OrderedDict()
370 self.maxsize = maxsize
371 if init_data is not None and len(init_data) > maxsize:
372 self.maxsize = len(init_data)
374 def get(self, key, generator):
375 """ Get the item with the given key from the cache. If nothing
376 is found in the cache, generate the value through the
377 generator function and store it in the cache.
379 value = self.data.get(key)
380 if value is not None:
381 self.data.move_to_end(key)
383 value = generator(key)
384 if len(self.data) >= self.maxsize:
385 self.data.popitem(last=False)
386 self.data[key] = value
392 """ Cache for token information to avoid repeated database queries.
394 This cache is not thread-safe and needs to be instantiated per
397 def __init__(self, conn):
399 self.streets = _LRU(maxsize=256)
400 self.places = _LRU(maxsize=128)
401 self.address_terms = _LRU(maxsize=1024)
403 # Lookup houseunumbers up to 100 and cache them
404 with conn.cursor() as cur:
405 cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
406 FROM generate_series(1, 100) as i""")
407 self._cached_housenumbers = {str(r[0]) : r[1] for r in cur}
409 # Get postcodes that are already saved
410 postcodes = OrderedDict()
411 with conn.cursor() as cur:
412 cur.execute("""SELECT word FROM word
413 WHERE class ='place' and type = 'postcode'""")
415 postcodes[row[0]] = None
416 self.postcodes = _LRU(maxsize=32, init_data=postcodes)
418 def get_housenumber(self, number):
419 """ Get a housenumber token from the cache.
421 return self._cached_housenumbers.get(number)