2 Tokenizer implementing normalisation as used before Nominatim 4.
4 from collections import OrderedDict
10 import psycopg2.extras
12 from nominatim.db.connection import connect
13 from nominatim.db import properties
14 from nominatim.db import utils as db_utils
15 from nominatim.db.sql_preprocessor import SQLPreprocessor
16 from nominatim.errors import UsageError
18 DBCFG_NORMALIZATION = "tokenizer_normalization"
19 DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
21 LOG = logging.getLogger()
23 def create(dsn, data_dir):
24 """ Create a new instance of the tokenizer provided by this module.
26 return LegacyTokenizer(dsn, data_dir)
29 def _install_module(config_module_path, src_dir, module_dir):
30 """ Copies the PostgreSQL normalisation module into the project
31 directory if necessary. For historical reasons the module is
32 saved in the '/module' subdirectory and not with the other tokenizer
35 The function detects when the installation is run from the
36 build directory. It doesn't touch the module in that case.
38 # Custom module locations are simply used as is.
39 if config_module_path:
40 LOG.info("Using custom path for database module at '%s'", config_module_path)
41 return config_module_path
43 # Compatibility mode for builddir installations.
44 if module_dir.exists() and src_dir.samefile(module_dir):
45 LOG.info('Running from build directory. Leaving database module as is.')
48 # In any other case install the module in the project directory.
49 if not module_dir.exists():
52 destfile = module_dir / 'nominatim.so'
53 shutil.copy(str(src_dir / 'nominatim.so'), str(destfile))
56 LOG.info('Database module installed at %s', str(destfile))
61 def _check_module(module_dir, conn):
62 """ Try to use the PostgreSQL module to confirm that it is correctly
63 installed and accessible from PostgreSQL.
65 with conn.cursor() as cur:
67 cur.execute("""CREATE FUNCTION nominatim_test_import_func(text)
68 RETURNS text AS '{}/nominatim.so', 'transliteration'
69 LANGUAGE c IMMUTABLE STRICT;
70 DROP FUNCTION nominatim_test_import_func(text)
71 """.format(module_dir))
72 except psycopg2.DatabaseError as err:
73 LOG.fatal("Error accessing database module: %s", err)
74 raise UsageError("Database module cannot be accessed.") from err
77 class LegacyTokenizer:
78 """ The legacy tokenizer uses a special PostgreSQL module to normalize
79 names and queries. The tokenizer thus implements normalization through
80 calls to the database.
83 def __init__(self, dsn, data_dir):
85 self.data_dir = data_dir
86 self.normalization = None
89 def init_new_db(self, config):
90 """ Set up a new tokenizer for the database.
92 This copies all necessary data in the project directory to make
93 sure the tokenizer remains stable even over updates.
95 module_dir = _install_module(config.DATABASE_MODULE_PATH,
96 config.lib_dir.module,
97 config.project_dir / 'module')
99 self.normalization = config.TERM_NORMALIZATION
101 with connect(self.dsn) as conn:
102 _check_module(module_dir, conn)
103 self._save_config(conn, config)
106 self.update_sql_functions(config)
107 self._init_db_tables(config)
110 def init_from_project(self):
111 """ Initialise the tokenizer from the project directory.
113 with connect(self.dsn) as conn:
114 self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION)
117 def update_sql_functions(self, config):
118 """ Reimport the SQL functions for this tokenizer.
120 with connect(self.dsn) as conn:
121 max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ)
122 modulepath = config.DATABASE_MODULE_PATH or \
123 str((config.project_dir / 'module').resolve())
124 sqlp = SQLPreprocessor(conn, config)
125 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer.sql',
126 max_word_freq=max_word_freq,
127 modulepath=modulepath)
130 def migrate_database(self, config):
131 """ Initialise the project directory of an existing database for
132 use with this tokenizer.
134 This is a special migration function for updating existing databases
135 to new software versions.
137 module_dir = _install_module(config.DATABASE_MODULE_PATH,
138 config.lib_dir.module,
139 config.project_dir / 'module')
141 with connect(self.dsn) as conn:
142 _check_module(module_dir, conn)
143 self._save_config(conn, config)
146 def name_analyzer(self):
147 """ Create a new analyzer for tokenizing names and queries
148 using this tokinzer. Analyzers are context managers and should
152 with tokenizer.name_analyzer() as analyzer:
156 When used outside the with construct, the caller must ensure to
157 call the close() function before destructing the analyzer.
159 Analyzers are not thread-safe. You need to instantiate one per thread.
161 return LegacyNameAnalyzer(self.dsn)
164 def _init_db_tables(self, config):
165 """ Set up the word table and fill it with pre-computed word
168 with connect(self.dsn) as conn:
169 sqlp = SQLPreprocessor(conn, config)
170 sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
173 LOG.warning("Precomputing word tokens")
174 db_utils.execute_file(self.dsn, config.lib_dir.data / 'words.sql')
177 def _save_config(self, conn, config):
178 """ Save the configuration that needs to remain stable for the given
179 database as database properties.
181 properties.set_property(conn, DBCFG_NORMALIZATION, self.normalization)
182 properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
186 class LegacyNameAnalyzer:
187 """ The legacy analyzer uses the special Postgresql module for
190 Each instance opens a connection to the database to request the
194 def __init__(self, dsn):
195 self.conn = connect(dsn).connection
196 self.conn.autocommit = True
197 psycopg2.extras.register_hstore(self.conn)
199 self._cache = _TokenCache(self.conn)
206 def __exit__(self, exc_type, exc_value, traceback):
211 """ Free all resources used by the analyzer.
218 def add_postcodes_from_db(self):
219 """ Add postcodes from the location_postcode table to the word table.
221 with self.conn.cursor() as cur:
222 cur.execute("""SELECT count(create_postcode_id(pc))
223 FROM (SELECT distinct(postcode) as pc
224 FROM location_postcode) x""")
226 def process_place(self, place):
227 """ Determine tokenizer information about the given place.
229 Returns a JSON-serialisable structure that will be handed into
230 the database via the token_info field.
232 token_info = _TokenInfo(self._cache)
234 token_info.add_names(self.conn, place.get('name'), place.get('country_feature'))
236 address = place.get('address')
239 self._add_postcode(address.get('postcode'))
240 token_info.add_housenumbers(self.conn, address)
241 token_info.add_address_parent(self.conn, address.get('street'),
242 address.get('place'))
243 token_info.add_address_parts(self.conn, address)
245 return token_info.data
248 def _add_postcode(self, postcode):
249 """ Make sure the normalized postcode is present in the word table.
251 if not postcode or re.search(r'[:,;]', postcode) is not None:
254 def _create_postcode_from_db(pcode):
255 with self.conn.cursor() as cur:
256 cur.execute('SELECT create_postcode_id(%s)', (pcode, ))
258 self._cache.postcodes.get(postcode.strip().upper(), _create_postcode_from_db)
262 """ Collect token information to be sent back to the database.
264 def __init__(self, cache):
269 def add_names(self, conn, names, country_feature):
270 """ Add token information for the names of the place.
275 with conn.cursor() as cur:
276 # Create the token IDs for all names.
277 self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text",
280 # Add country tokens to word table if necessary.
281 if country_feature and re.fullmatch(r'[A-Za-z][A-Za-z]', country_feature):
282 cur.execute("SELECT create_country(%s, %s)",
283 (names, country_feature.lower()))
286 def add_housenumbers(self, conn, address):
287 """ Extract housenumber information from the address.
289 hnrs = [v for k, v in address.items()
290 if k in ('housenumber', 'streetnumber', 'conscriptionnumber')]
296 token = self.cache.get_housenumber(hnrs[0])
297 if token is not None:
298 self.data['hnr_tokens'] = token
299 self.data['hnr'] = hnrs[0]
302 # split numbers if necessary
305 simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
307 if len(simple_list) > 1:
308 simple_list = list(set(simple_list))
310 with conn.cursor() as cur:
311 cur.execute("SELECT (create_housenumbers(%s)).* ", (simple_list, ))
312 self.data['hnr_tokens'], self.data['hnr'] = cur.fetchone()
315 def add_address_parent(self, conn, street, place):
316 """ Extract the tokens for street and place terms.
318 def _get_streetplace(name):
319 with conn.cursor() as cur:
320 cur.execute("""SELECT (addr_ids_from_name(%s) || getorcreate_name_id(make_standard_name(%s), ''))::text,
321 word_ids_from_name(%s)::text""",
323 return cur.fetchone()
326 self.data['street_search'], self.data['street_match'] = \
327 self.cache.streets.get(street, _get_streetplace)
330 self.data['place_search'], self.data['place_match'] = \
331 self.cache.streets.get(place, _get_streetplace)
334 def add_address_parts(self, conn, address):
335 """ Extract address terms.
337 def _get_address_term(name):
338 with conn.cursor() as cur:
339 cur.execute("""SELECT addr_ids_from_name(%s)::text,
340 word_ids_from_name(%s)::text""",
342 return cur.fetchone()
345 for key, value in address.items():
346 if not key.startswith('_') and \
347 key not in ('country', 'street', 'place', 'postcode', 'full',
348 'housenumber', 'streetnumber', 'conscriptionnumber'):
349 tokens[key] = self.cache.address_terms.get(value, _get_address_term)
352 self.data['addr'] = tokens
356 """ Least recently used cache that accepts a generator function to
357 produce the item when there is a cache miss.
360 def __init__(self, maxsize=128):
361 self.data = OrderedDict()
362 self.maxsize = maxsize
364 def get(self, key, generator):
365 """ Get the item with the given key from the cache. If nothing
366 is found in the cache, generate the value through the
367 generator function and store it in the cache.
369 value = self.data.get(key)
370 if value is not None:
371 self.data.move_to_end(key)
373 value = generator(key)
374 if len(self.data) >= self.maxsize:
375 self.data.popitem(last=False)
376 self.data[key] = value
382 """ Cache for token information to avoid repeated database queries.
384 This cache is not thread-safe and needs to be instantiated per
387 def __init__(self, conn):
389 self.postcodes = _LRU(maxsize=32)
390 self.streets = _LRU(maxsize=256)
391 self.places = _LRU(maxsize=128)
392 self.address_terms = _LRU(maxsize=1024)
394 # Lookup houseunumbers up to 100 and cache them
395 with conn.cursor() as cur:
396 cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
397 FROM generate_series(1, 100) as i""")
398 self._cached_housenumbers = {str(r[0]) : r[1] for r in cur}
401 def get_housenumber(self, number):
402 """ Get a housenumber token from the cache.
404 return self._cached_housenumbers.get(number)