2 Tests for Legacy ICU tokenizer.
9 from nominatim.tokenizer import legacy_icu_tokenizer
10 from nominatim.tokenizer.icu_name_processor import ICUNameProcessorRules
11 from nominatim.tokenizer.icu_rule_loader import ICURuleLoader
12 from nominatim.db import properties
16 def test_config(def_config, tmp_path):
17 def_config.project_dir = tmp_path / 'project'
18 def_config.project_dir.mkdir()
20 sqldir = tmp_path / 'sql'
22 (sqldir / 'tokenizer').mkdir()
23 (sqldir / 'tokenizer' / 'legacy_icu_tokenizer.sql').write_text("SELECT 'a'")
24 shutil.copy(str(def_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'),
25 str(sqldir / 'tokenizer' / 'legacy_tokenizer_tables.sql'))
27 def_config.lib_dir.sql = sqldir
33 def tokenizer_factory(dsn, tmp_path, property_table,
34 sql_preprocessor, place_table, word_table):
35 (tmp_path / 'tokenizer').mkdir()
38 return legacy_icu_tokenizer.create(dsn, tmp_path / 'tokenizer')
44 def db_prop(temp_db_conn):
45 def _get_db_property(name):
46 return properties.get_property(temp_db_conn, name)
48 return _get_db_property
52 def analyzer(tokenizer_factory, test_config, monkeypatch,
53 temp_db_with_extensions, tmp_path):
54 sql = tmp_path / 'sql' / 'tokenizer' / 'legacy_icu_tokenizer.sql'
55 sql.write_text("SELECT 'a';")
57 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
58 tok = tokenizer_factory()
59 tok.init_new_db(test_config)
62 def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
63 suffixes=('gasse', ), abbr=('street => st', )):
64 cfgfile = tmp_path / 'analyser_test_config.yaml'
65 with cfgfile.open('w') as stream:
66 cfgstr = {'normalization' : list(norm),
67 'transliteration' : list(trans),
68 'compound_suffixes' : list(suffixes),
69 'abbreviations' : list(abbr)}
70 yaml.dump(cfgstr, stream)
71 tok.naming_rules = ICUNameProcessorRules(loader=ICURuleLoader(cfgfile))
73 return tok.name_analyzer()
79 def getorcreate_full_word(temp_db_cursor):
80 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_full_word(
81 norm_term TEXT, lookup_terms TEXT[],
83 OUT partial_tokens INT[])
86 partial_terms TEXT[] = '{}'::TEXT[];
91 SELECT min(word_id) INTO full_token
92 FROM word WHERE word = norm_term and class is null and country_code is null;
94 IF full_token IS NULL THEN
95 full_token := nextval('seq_word');
96 INSERT INTO word (word_id, word_token, word, search_name_count)
97 SELECT full_token, ' ' || lookup_term, norm_term, 0 FROM unnest(lookup_terms) as lookup_term;
100 FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
102 IF NOT (ARRAY[term] <@ partial_terms) THEN
103 partial_terms := partial_terms || term;
107 partial_tokens := '{}'::INT[];
108 FOR term IN SELECT unnest(partial_terms) LOOP
109 SELECT min(word_id), max(search_name_count) INTO term_id, term_count
110 FROM word WHERE word_token = term and class is null and country_code is null;
112 IF term_id IS NULL THEN
113 term_id := nextval('seq_word');
115 INSERT INTO word (word_id, word_token, search_name_count)
116 VALUES (term_id, term, 0);
119 IF NOT (ARRAY[term_id] <@ partial_tokens) THEN
120 partial_tokens := partial_tokens || term_id;
130 def getorcreate_hnr_id(temp_db_cursor):
131 temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_hnr_id(lookup_term TEXT)
132 RETURNS INTEGER AS $$
133 SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
136 def test_init_new(tokenizer_factory, test_config, monkeypatch, db_prop):
137 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
139 tok = tokenizer_factory()
140 tok.init_new_db(test_config)
142 assert db_prop(legacy_icu_tokenizer.DBCFG_TERM_NORMALIZATION) == ':: lower();'
143 assert db_prop(legacy_icu_tokenizer.DBCFG_MAXWORDFREQ) is not None
146 def test_init_word_table(tokenizer_factory, test_config, place_row, word_table):
147 place_row(names={'name' : 'Test Area', 'ref' : '52'})
148 place_row(names={'name' : 'No Area'})
149 place_row(names={'name' : 'Holzstrasse'})
151 tok = tokenizer_factory()
152 tok.init_new_db(test_config)
154 assert word_table.get_partial_words() == {('test', 1), ('52', 1),
155 ('no', 1), ('area', 2),
156 ('holzstrasse', 1), ('holzstr', 1),
157 ('holz', 1), ('strasse', 1),
161 def test_init_from_project(monkeypatch, test_config, tokenizer_factory):
162 monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
163 monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '90300')
164 tok = tokenizer_factory()
165 tok.init_new_db(test_config)
168 tok = tokenizer_factory()
169 tok.init_from_project()
171 assert tok.naming_rules is not None
172 assert tok.term_normalization == ':: lower();'
173 assert tok.max_word_frequency == '90300'
176 def test_update_sql_functions(db_prop, temp_db_cursor,
177 tokenizer_factory, test_config, table_factory,
179 monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '1133')
180 tok = tokenizer_factory()
181 tok.init_new_db(test_config)
184 assert db_prop(legacy_icu_tokenizer.DBCFG_MAXWORDFREQ) == '1133'
186 table_factory('test', 'txt TEXT')
188 func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_icu_tokenizer.sql'
189 func_file.write_text("""INSERT INTO test VALUES ('{{max_word_freq}}')""")
191 tok.update_sql_functions(test_config)
193 test_content = temp_db_cursor.row_set('SELECT * FROM test')
194 assert test_content == set((('1133', ), ))
197 def test_normalize_postcode(analyzer):
198 with analyzer() as anl:
199 anl.normalize_postcode('123') == '123'
200 anl.normalize_postcode('ab-34 ') == 'AB-34'
201 anl.normalize_postcode('38 Б') == '38 Б'
204 def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table):
205 table_factory('location_postcode', 'postcode TEXT',
206 content=(('1234',), ('12 34',), ('AB23',), ('1234',)))
208 with analyzer() as anl:
209 anl.update_postcodes_from_db()
211 assert word_table.count() == 3
212 assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
215 def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_table):
216 table_factory('location_postcode', 'postcode TEXT',
217 content=(('1234',), ('45BC', ), ('XX45', )))
218 word_table.add_postcode(' 1234', '1234')
219 word_table.add_postcode(' 5678', '5678')
221 with analyzer() as anl:
222 anl.update_postcodes_from_db()
224 assert word_table.count() == 3
225 assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
228 def test_update_special_phrase_empty_table(analyzer, word_table):
229 with analyzer() as anl:
230 anl.update_special_phrases([
231 ("König bei", "amenity", "royal", "near"),
232 ("Könige ", "amenity", "royal", "-"),
233 ("street", "highway", "primary", "in")
236 assert word_table.get_special() \
237 == {(' KÖNIG BEI', 'König bei', 'amenity', 'royal', 'near'),
238 (' KÖNIGE', 'Könige', 'amenity', 'royal', None),
239 (' STREET', 'street', 'highway', 'primary', 'in')}
242 def test_update_special_phrase_delete_all(analyzer, word_table):
243 word_table.add_special(' FOO', 'foo', 'amenity', 'prison', 'in')
244 word_table.add_special(' BAR', 'bar', 'highway', 'road', None)
246 assert word_table.count_special() == 2
248 with analyzer() as anl:
249 anl.update_special_phrases([], True)
251 assert word_table.count_special() == 0
254 def test_update_special_phrases_no_replace(analyzer, word_table):
255 word_table.add_special(' FOO', 'foo', 'amenity', 'prison', 'in')
256 word_table.add_special(' BAR', 'bar', 'highway', 'road', None)
258 assert word_table.count_special() == 2
260 with analyzer() as anl:
261 anl.update_special_phrases([], False)
263 assert word_table.count_special() == 2
266 def test_update_special_phrase_modify(analyzer, word_table):
267 word_table.add_special(' FOO', 'foo', 'amenity', 'prison', 'in')
268 word_table.add_special(' BAR', 'bar', 'highway', 'road', None)
270 assert word_table.count_special() == 2
272 with analyzer() as anl:
273 anl.update_special_phrases([
274 ('prison', 'amenity', 'prison', 'in'),
275 ('bar', 'highway', 'road', '-'),
276 ('garden', 'leisure', 'garden', 'near')
279 assert word_table.get_special() \
280 == {(' PRISON', 'prison', 'amenity', 'prison', 'in'),
281 (' BAR', 'bar', 'highway', 'road', None),
282 (' GARDEN', 'garden', 'leisure', 'garden', 'near')}
285 def test_add_country_names_new(analyzer, word_table):
286 with analyzer() as anl:
287 anl.add_country_names('es', {'name': 'Espagña', 'name:en': 'Spain'})
289 assert word_table.get_country() == {('es', ' ESPAGÑA'), ('es', ' SPAIN')}
292 def test_add_country_names_extend(analyzer, word_table):
293 word_table.add_country('ch', ' SCHWEIZ')
295 with analyzer() as anl:
296 anl.add_country_names('ch', {'name': 'Schweiz', 'name:fr': 'Suisse'})
298 assert word_table.get_country() == {('ch', ' SCHWEIZ'), ('ch', ' SUISSE')}
301 class TestPlaceNames:
303 @pytest.fixture(autouse=True)
304 def setup(self, analyzer, getorcreate_full_word):
305 with analyzer() as anl:
310 def expect_name_terms(self, info, *expected_terms):
311 tokens = self.analyzer.get_word_token_info(expected_terms)
313 assert token[2] is not None, "No token for {0}".format(token)
315 assert eval(info['names']) == set((t[2] for t in tokens))
318 def test_simple_names(self):
319 info = self.analyzer.process_place({'name': {'name': 'Soft bAr', 'ref': '34'}})
321 self.expect_name_terms(info, '#Soft bAr', '#34','Soft', 'bAr', '34')
324 @pytest.mark.parametrize('sep', [',' , ';'])
325 def test_names_with_separator(self, sep):
326 info = self.analyzer.process_place({'name': {'name': sep.join(('New York', 'Big Apple'))}})
328 self.expect_name_terms(info, '#New York', '#Big Apple',
329 'new', 'york', 'big', 'apple')
332 def test_full_names_with_bracket(self):
333 info = self.analyzer.process_place({'name': {'name': 'Houseboat (left)'}})
335 self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
339 def test_country_name(self, word_table):
340 info = self.analyzer.process_place({'name': {'name': 'Norge'},
341 'country_feature': 'no'})
343 self.expect_name_terms(info, '#norge', 'norge')
344 assert word_table.get_country() == {('no', ' NORGE')}
347 class TestPlaceAddress:
349 @pytest.fixture(autouse=True)
350 def setup(self, analyzer, getorcreate_full_word):
351 with analyzer(trans=(":: upper()", "'🜵' > ' '")) as anl:
356 def process_address(self, **kwargs):
357 return self.analyzer.process_place({'address': kwargs})
360 def name_token_set(self, *expected_terms):
361 tokens = self.analyzer.get_word_token_info(expected_terms)
363 assert token[2] is not None, "No token for {0}".format(token)
365 return set((t[2] for t in tokens))
368 @pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
369 def test_process_place_postcode(self, word_table, pcode):
370 self.process_address(postcode=pcode)
372 assert word_table.get_postcodes() == {pcode, }
375 @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
376 def test_process_place_bad_postcode(self, word_table, pcode):
377 self.process_address(postcode=pcode)
379 assert not word_table.get_postcodes()
382 @pytest.mark.parametrize('hnr', ['123a', '1', '101'])
383 def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
384 info = self.process_address(housenumber=hnr)
386 assert info['hnr'] == hnr.upper()
387 assert info['hnr_tokens'] == "{-1}"
390 def test_process_place_housenumbers_lists(self, getorcreate_hnr_id):
391 info = self.process_address(conscriptionnumber='1; 2;3')
393 assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
394 assert info['hnr_tokens'] == "{-1,-2,-3}"
397 def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
398 info = self.process_address(housenumber='134',
399 conscriptionnumber='134',
402 assert set(info['hnr'].split(';')) == set(('134', '99A'))
403 assert info['hnr_tokens'] == "{-1,-2}"
406 def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
407 info = self.process_address(housenumber="45")
408 assert info['hnr_tokens'] == "{-1}"
410 info = self.process_address(housenumber="46")
411 assert info['hnr_tokens'] == "{-2}"
413 info = self.process_address(housenumber="41;45")
414 assert eval(info['hnr_tokens']) == {-1, -3}
416 info = self.process_address(housenumber="41")
417 assert eval(info['hnr_tokens']) == {-3}
420 def test_process_place_street(self):
421 info = self.process_address(street='Grand Road')
423 assert eval(info['street']) == self.name_token_set('#GRAND ROAD')
426 def test_process_place_street_empty(self):
427 info = self.process_address(street='🜵')
429 assert 'street' not in info
432 def test_process_place_place(self):
433 info = self.process_address(place='Honu Lulu')
435 assert eval(info['place_search']) == self.name_token_set('#HONU LULU',
437 assert eval(info['place_match']) == self.name_token_set('#HONU LULU')
440 def test_process_place_place_empty(self):
441 info = self.process_address(place='🜵')
443 assert 'place_search' not in info
444 assert 'place_match' not in info
447 def test_process_place_address_terms(self):
448 info = self.process_address(country='de', city='Zwickau', state='Sachsen',
449 suburb='Zwickau', street='Hauptstr',
450 full='right behind the church')
452 city_full = self.name_token_set('#ZWICKAU')
453 city_all = self.name_token_set('#ZWICKAU', 'ZWICKAU')
454 state_full = self.name_token_set('#SACHSEN')
455 state_all = self.name_token_set('#SACHSEN', 'SACHSEN')
457 result = {k: [eval(v[0]), eval(v[1])] for k,v in info['addr'].items()}
459 assert result == {'city': [city_all, city_full],
460 'suburb': [city_all, city_full],
461 'state': [state_all, state_full]}
464 def test_process_place_address_terms_empty(self):
465 info = self.process_address(country='de', city=' ', street='Hauptstr',
466 full='right behind the church')
468 assert 'addr' not in info