]> git.openstreetmap.org Git - nominatim.git/blobdiff - test/python/tokenizer/test_icu.py
Merge pull request #3397 from lonvia/improve-handling-unlisted-places
[nominatim.git] / test / python / tokenizer / test_icu.py
index a3839365a750baa9c39ad8555acea1416fee9c79..2a4865db2acb95e4e177c5732441ef1f7dc610a0 100644 (file)
@@ -7,8 +7,8 @@
 """
 Tests for ICU tokenizer.
 """
-import shutil
 import yaml
+import itertools
 
 import pytest
 
@@ -16,7 +16,7 @@ from nominatim.tokenizer import icu_tokenizer
 import nominatim.tokenizer.icu_rule_loader
 from nominatim.db import properties
 from nominatim.db.sql_preprocessor import SQLPreprocessor
-from nominatim.indexer.place_info import PlaceInfo
+from nominatim.data.place_info import PlaceInfo
 
 from mock_icu_word_table import MockIcuWordTable
 
@@ -31,8 +31,6 @@ def test_config(project_env, tmp_path):
     sqldir.mkdir()
     (sqldir / 'tokenizer').mkdir()
     (sqldir / 'tokenizer' / 'icu_tokenizer.sql').write_text("SELECT 'a'")
-    shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'),
-                str(sqldir / 'tokenizer' / 'icu_tokenizer_tables.sql'))
 
     project_env.lib_dir.sql = sqldir
 
@@ -71,12 +69,19 @@ def analyzer(tokenizer_factory, test_config, monkeypatch,
 
     def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
                      variants=('~gasse -> gasse', 'street => st', ),
-                     sanitizers=[]):
+                     sanitizers=[], with_housenumber=False,
+                     with_postcode=False):
         cfgstr = {'normalization': list(norm),
                   'sanitizers': sanitizers,
                   'transliteration': list(trans),
                   'token-analysis': [{'analyzer': 'generic',
                                       'variants': [{'words': list(variants)}]}]}
+        if with_housenumber:
+            cfgstr['token-analysis'].append({'id': '@housenumber',
+                                             'analyzer': 'housenumbers'})
+        if with_postcode:
+            cfgstr['token-analysis'].append({'id': '@postcode',
+                                             'analyzer': 'postcodes'})
         (test_config.project_dir / 'icu_tokenizer.yaml').write_text(yaml.dump(cfgstr))
         tok.loader = nominatim.tokenizer.icu_rule_loader.ICURuleLoader(test_config)
 
@@ -196,16 +201,14 @@ def test_update_sql_functions(db_prop, temp_db_cursor,
 
 def test_finalize_import(tokenizer_factory, temp_db_conn,
                          temp_db_cursor, test_config, sql_preprocessor_cfg):
-    func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_indices.sql'
-    func_file.write_text("""CREATE FUNCTION test() RETURNS TEXT
-                            AS $$ SELECT 'b'::text $$ LANGUAGE SQL""")
-
     tok = tokenizer_factory()
     tok.init_new_db(test_config)
 
+    assert not temp_db_conn.index_exists('idx_word_word_id')
+
     tok.finalize_import(test_config)
 
-    temp_db_cursor.scalar('SELECT test()') == 'b'
+    assert temp_db_conn.index_exists('idx_word_word_id')
 
 
 def test_check_database(test_config, tokenizer_factory,
@@ -216,23 +219,28 @@ def test_check_database(test_config, tokenizer_factory,
     assert tok.check_database(test_config) is None
 
 
-def test_update_statistics_reverse_only(word_table, tokenizer_factory):
+def test_update_statistics_reverse_only(word_table, tokenizer_factory, test_config):
     tok = tokenizer_factory()
-    tok.update_statistics()
+    tok.update_statistics(test_config)
 
 
-def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory):
+def test_update_statistics(word_table, table_factory, temp_db_cursor,
+                           tokenizer_factory, test_config):
     word_table.add_full_word(1000, 'hello')
+    word_table.add_full_word(1001, 'bye')
     table_factory('search_name',
-                  'place_id BIGINT, name_vector INT[]',
-                  [(12, [1000])])
+                  'place_id BIGINT, name_vector INT[], nameaddress_vector INT[]',
+                  [(12, [1000], [1001])])
     tok = tokenizer_factory()
 
-    tok.update_statistics()
+    tok.update_statistics(test_config)
 
     assert temp_db_cursor.scalar("""SELECT count(*) FROM word
-                                    WHERE type = 'W' and
-                                          (info->>'count')::int > 0""") > 0
+                                    WHERE type = 'W' and word_id = 1000 and
+                                          (info->>'count')::int > 0""") == 1
+    assert temp_db_cursor.scalar("""SELECT count(*) FROM word
+                                    WHERE type = 'W' and word_id = 1001 and
+                                          (info->>'addr_count')::int > 0""") == 1
 
 
 def test_normalize_postcode(analyzer):
@@ -242,28 +250,69 @@ def test_normalize_postcode(analyzer):
         anl.normalize_postcode('38 Б') == '38 Б'
 
 
-def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table):
-    table_factory('location_postcode', 'postcode TEXT',
-                  content=(('1234',), ('12 34',), ('AB23',), ('1234',)))
+class TestPostcodes:
 
-    with analyzer() as anl:
-        anl.update_postcodes_from_db()
+    @pytest.fixture(autouse=True)
+    def setup(self, analyzer, sql_functions):
+        sanitizers = [{'step': 'clean-postcodes'}]
+        with analyzer(sanitizers=sanitizers, with_postcode=True) as anl:
+            self.analyzer = anl
+            yield anl
 
-    assert word_table.count() == 3
-    assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
 
+    def process_postcode(self, cc, postcode):
+        return self.analyzer.process_place(PlaceInfo({'country_code': cc,
+                                                      'address': {'postcode': postcode}}))
 
-def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_table):
-    table_factory('location_postcode', 'postcode TEXT',
-                  content=(('1234',), ('45BC', ), ('XX45', )))
-    word_table.add_postcode(' 1234', '1234')
-    word_table.add_postcode(' 5678', '5678')
 
-    with analyzer() as anl:
-        anl.update_postcodes_from_db()
+    def test_update_postcodes_from_db_empty(self, table_factory, word_table):
+        table_factory('location_postcode', 'country_code TEXT, postcode TEXT',
+                      content=(('de', '12345'), ('se', '132 34'),
+                               ('bm', 'AB23'), ('fr', '12345')))
+
+        self.analyzer.update_postcodes_from_db()
+
+        assert word_table.count() == 5
+        assert word_table.get_postcodes() == {'12345', '132 34@132 34', 'AB 23@AB 23'}
+
+
+    def test_update_postcodes_from_db_ambigious(self, table_factory, word_table):
+        table_factory('location_postcode', 'country_code TEXT, postcode TEXT',
+                      content=(('in', '123456'), ('sg', '123456')))
+
+        self.analyzer.update_postcodes_from_db()
+
+        assert word_table.count() == 3
+        assert word_table.get_postcodes() == {'123456', '123456@123 456'}
+
+
+    def test_update_postcodes_from_db_add_and_remove(self, table_factory, word_table):
+        table_factory('location_postcode', 'country_code TEXT, postcode TEXT',
+                      content=(('ch', '1234'), ('bm', 'BC 45'), ('bm', 'XX45')))
+        word_table.add_postcode(' 1234', '1234')
+        word_table.add_postcode(' 5678', '5678')
+
+        self.analyzer.update_postcodes_from_db()
+
+        assert word_table.count() == 5
+        assert word_table.get_postcodes() == {'1234', 'BC 45@BC 45', 'XX 45@XX 45'}
+
+
+    def test_process_place_postcode_simple(self, word_table):
+        info = self.process_postcode('de', '12345')
+
+        assert info['postcode'] == '12345'
+
+        assert word_table.get_postcodes() == {'12345', }
+
+
+    def test_process_place_postcode_with_space(self, word_table):
+        info = self.process_postcode('in', '123 567')
+
+        assert info['postcode'] == '123567'
+
+        assert word_table.get_postcodes() == {'123567@123 567', }
 
-    assert word_table.count() == 3
-    assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
 
 
 def test_update_special_phrase_empty_table(analyzer, word_table):
@@ -433,13 +482,6 @@ class TestPlaceAddress:
         assert word_table.get_postcodes() == {pcode, }
 
 
-    @pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
-    def test_process_place_bad_postcode(self, word_table, pcode):
-        self.process_address(postcode=pcode)
-
-        assert not word_table.get_postcodes()
-
-
     @pytest.mark.parametrize('hnr', ['123a', '1', '101'])
     def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
         info = self.process_address(housenumber=hnr)
@@ -481,7 +523,7 @@ class TestPlaceAddress:
     def test_process_place_nonexisting_street(self):
         info = self.process_address(street='Grand Road')
 
-        assert 'street' not in info
+        assert info['street'] == '{}'
 
 
     def test_process_place_multiple_street_tags(self):
@@ -496,7 +538,7 @@ class TestPlaceAddress:
     def test_process_place_street_empty(self):
         info = self.process_address(street='🜵')
 
-        assert 'street' not in info
+        assert info['street'] == '{}'
 
 
     def test_process_place_street_from_cache(self):
@@ -512,7 +554,7 @@ class TestPlaceAddress:
     def test_process_place_place(self):
         info = self.process_address(place='Honu Lulu')
 
-        assert eval(info['place']) == self.name_token_set('HONU', 'LULU')
+        assert eval(info['place']) == self.name_token_set('HONU', 'LULU', '#HONU LULU')
 
 
     def test_process_place_place_extra(self):
@@ -532,8 +574,8 @@ class TestPlaceAddress:
                                     suburb='Zwickau', street='Hauptstr',
                                     full='right behind the church')
 
-        city = self.name_token_set('ZWICKAU')
-        state = self.name_token_set('SACHSEN')
+        city = self.name_token_set('ZWICKAU', '#ZWICKAU')
+        state = self.name_token_set('SACHSEN', '#SACHSEN')
 
         result = {k: eval(v) for k,v in info['addr'].items()}
 
@@ -545,7 +587,7 @@ class TestPlaceAddress:
 
         result = {k: eval(v) for k,v in info['addr'].items()}
 
-        assert result == {'city': self.name_token_set('Bruxelles')}
+        assert result == {'city': self.name_token_set('Bruxelles', '#Bruxelles')}
 
 
     def test_process_place_address_terms_empty(self):
@@ -554,3 +596,142 @@ class TestPlaceAddress:
 
         assert 'addr' not in info
 
+
+class TestPlaceHousenumberWithAnalyser:
+
+    @pytest.fixture(autouse=True)
+    def setup(self, analyzer, sql_functions):
+        hnr = {'step': 'clean-housenumbers',
+               'filter-kind': ['housenumber', 'conscriptionnumber', 'streetnumber']}
+        with analyzer(trans=(":: upper()", "'🜵' > ' '"), sanitizers=[hnr], with_housenumber=True) as anl:
+            self.analyzer = anl
+            yield anl
+
+
+    @pytest.fixture
+    def getorcreate_hnr_id(self, temp_db_cursor):
+        temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_analyzed_hnr_id(norm_term TEXT, lookup_terms TEXT[])
+                                  RETURNS INTEGER AS $$
+                                    SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
+
+
+    def process_address(self, **kwargs):
+        return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
+
+
+    def name_token_set(self, *expected_terms):
+        tokens = self.analyzer.get_word_token_info(expected_terms)
+        for token in tokens:
+            assert token[2] is not None, "No token for {0}".format(token)
+
+        return set((t[2] for t in tokens))
+
+
+    @pytest.mark.parametrize('hnr', ['123 a', '1', '101'])
+    def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
+        info = self.process_address(housenumber=hnr)
+
+        assert info['hnr'] == hnr.upper()
+        assert info['hnr_tokens'] == "{-1}"
+
+
+    def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
+        info = self.process_address(housenumber='134',
+                                    conscriptionnumber='134',
+                                    streetnumber='99a')
+
+        assert set(info['hnr'].split(';')) == set(('134', '99 A'))
+        assert info['hnr_tokens'] == "{-1,-2}"
+
+
+    def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
+        info = self.process_address(housenumber="45")
+        assert info['hnr_tokens'] == "{-1}"
+
+        info = self.process_address(housenumber="46")
+        assert info['hnr_tokens'] == "{-2}"
+
+        info = self.process_address(housenumber="41;45")
+        assert eval(info['hnr_tokens']) == {-1, -3}
+
+        info = self.process_address(housenumber="41")
+        assert eval(info['hnr_tokens']) == {-3}
+
+
+class TestUpdateWordTokens:
+
+    @pytest.fixture(autouse=True)
+    def setup(self, tokenizer_factory, table_factory, placex_table, word_table):
+        table_factory('search_name', 'place_id BIGINT, name_vector INT[]')
+        self.tok = tokenizer_factory()
+
+
+    @pytest.fixture
+    def search_entry(self, temp_db_cursor):
+        place_id = itertools.count(1000)
+
+        def _insert(*args):
+            temp_db_cursor.execute("INSERT INTO search_name VALUES (%s, %s)",
+                                   (next(place_id), list(args)))
+
+        return _insert
+
+
+    @pytest.fixture(params=['simple', 'analyzed'])
+    def add_housenumber(self, request, word_table):
+        if request.param == 'simple':
+            def _make(hid, hnr):
+                word_table.add_housenumber(hid, hnr)
+        elif request.param == 'analyzed':
+            def _make(hid, hnr):
+                word_table.add_housenumber(hid, [hnr])
+
+        return _make
+
+
+    @pytest.mark.parametrize('hnr', ('1a', '1234567', '34 5'))
+    def test_remove_unused_housenumbers(self, add_housenumber, word_table, hnr):
+        word_table.add_housenumber(1000, hnr)
+
+        assert word_table.count_housenumbers() == 1
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 0
+
+
+    def test_keep_unused_numeral_housenumbers(self, add_housenumber, word_table):
+        add_housenumber(1000, '5432')
+
+        assert word_table.count_housenumbers() == 1
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
+
+
+    def test_keep_housenumbers_from_search_name_table(self, add_housenumber, word_table, search_entry):
+        add_housenumber(9999, '5432a')
+        add_housenumber(9991, '9 a')
+        search_entry(123, 9999, 34)
+
+        assert word_table.count_housenumbers() == 2
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
+
+
+    def test_keep_housenumbers_from_placex_table(self, add_housenumber, word_table, placex_table):
+        add_housenumber(9999, '5432a')
+        add_housenumber(9990, '34z')
+        placex_table.add(housenumber='34z')
+        placex_table.add(housenumber='25432a')
+
+        assert word_table.count_housenumbers() == 2
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1
+
+
+    def test_keep_housenumbers_from_placex_table_hnr_list(self, add_housenumber, word_table, placex_table):
+        add_housenumber(9991, '9 b')
+        add_housenumber(9990, '34z')
+        placex_table.add(housenumber='9 a;9 b;9 c')
+
+        assert word_table.count_housenumbers() == 2
+        self.tok.update_word_tokens()
+        assert word_table.count_housenumbers() == 1