<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;"># SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2025 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for import name normalisation and variant generation.
"""
import pytest

from icu import Transliterator

import nominatim_db.tokenizer.token_analysis.generic as module
from nominatim_db.errors import UsageError

DEFAULT_NORMALIZATION = """ :: NFD ();
                            'ðŸœ³' &gt; ' ';
                            [[:Nonspacing Mark:] [:Cf:]] &gt;;
                            :: lower ();
                            [[:Punctuation:][:Space:]]+ &gt; ' ';
                            :: NFC ();
                        """

DEFAULT_TRANSLITERATION = """ ::  Latin ();
                              'ðŸœµ' &gt; ' ';
                          """


def make_analyser(*variants, variant_only=False):
    rules = {'analyzer': 'generic', 'variants': [{'words': variants}]}
    if variant_only:
        rules['mode'] = 'variant-only'
    trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
    norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
    config = module.configure(rules, norm, trans)

    return module.create(norm, trans, config)


def get_normalized_variants(proc, name):
    norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
    return proc.compute_variants(norm.transliterate(name).strip())[0]


def test_no_variants():
    rules = {'analyzer': 'generic'}
    trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
    norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
    config = module.configure(rules, norm, trans)

    proc = module.create(norm, trans, config)

    assert get_normalized_variants(proc, 'å¤§å¾·!') == ['dÃ&nbsp; dÃ©']


def test_variants_empty():
    proc = make_analyser('saint -&gt; ðŸœµ', 'street -&gt; st')

    assert get_normalized_variants(proc, 'ðŸœµ') == []
    assert get_normalized_variants(proc, 'ðŸœ³') == []
    assert get_normalized_variants(proc, 'saint') == ['saint']


VARIANT_TESTS = [
    (('~strasse,~straÃŸe -&gt; str', '~weg =&gt; weg'), "hallo", {'hallo'}),
    (('weg =&gt; wg',), "holzweg", {'holzweg'}),
    (('weg -&gt; wg',), "holzweg", {'holzweg'}),
    (('~weg =&gt; weg',), "holzweg", {'holz weg', 'holzweg'}),
    (('~weg -&gt; weg',), "holzweg",  {'holz weg', 'holzweg'}),
    (('~weg =&gt; w',), "holzweg", {'holz w', 'holzw'}),
    (('~weg -&gt; w',), "holzweg",  {'holz weg', 'holzweg', 'holz w', 'holzw'}),
    (('~weg =&gt; weg',), "Meier Weg", {'meier weg', 'meierweg'}),
    (('~weg -&gt; weg',), "Meier Weg", {'meier weg', 'meierweg'}),
    (('~weg =&gt; w',), "Meier Weg", {'meier w', 'meierw'}),
    (('~weg -&gt; w',), "Meier Weg", {'meier weg', 'meierweg', 'meier w', 'meierw'}),
    (('weg =&gt; wg',), "Meier Weg", {'meier wg'}),
    (('weg -&gt; wg',), "Meier Weg", {'meier weg', 'meier wg'}),
    (('~strasse,~straÃŸe -&gt; str', '~weg =&gt; weg'), "BauwegstraÃŸe",
     {'bauweg straÃŸe', 'bauweg str', 'bauwegstraÃŸe', 'bauwegstr'}),
    (('am =&gt; a', 'bach =&gt; b'), "am bach", {'a b'}),
    (('am =&gt; a', '~bach =&gt; b'), "am bach", {'a b'}),
    (('am -&gt; a', '~bach -&gt; b'), "am bach", {'am bach', 'a bach', 'am b', 'a b'}),
    (('am -&gt; a', '~bach -&gt; b'), "ambach", {'ambach', 'am bach', 'amb', 'am b'}),
    (('saint -&gt; s,st', 'street -&gt; st'), "Saint Johns Street",
     {'saint johns street', 's johns street', 'st johns street',
      'saint johns st', 's johns st', 'st johns st'}),
    (('river$ -&gt; r',), "River Bend Road", {'river bend road'}),
    (('river$ -&gt; r',), "Bent River", {'bent river', 'bent r'}),
    (('^north =&gt; n',), "North 2nd Street", {'n 2nd street'}),
    (('^north =&gt; n',), "Airport North", {'airport north'}),
    (('am -&gt; a',), "am am am am am am am am", {'am am am am am am am am'}),
    (('am =&gt; a',), "am am am am am am am am", {'a a a a a a a a'})
    ]


@pytest.mark.parametrize("rules,name,variants", VARIANT_TESTS)
def test_variants(rules, name, variants):
    proc = make_analyser(*rules)

    result = get_normalized_variants(proc, name)

    assert len(result) == len(set(result))
    assert set(get_normalized_variants(proc, name)) == variants


VARIANT_ONLY_TESTS = [
    (('weg =&gt; wg',), "hallo", set()),
    (('weg =&gt; wg',), "Meier Weg", {'meier wg'}),
    (('weg -&gt; wg',), "Meier Weg", {'meier wg'}),
    ]


@pytest.mark.parametrize("rules,name,variants", VARIANT_ONLY_TESTS)
def test_variants_only(rules, name, variants):
    proc = make_analyser(*rules, variant_only=True)

    result = get_normalized_variants(proc, name)

    assert len(result) == len(set(result))
    assert set(get_normalized_variants(proc, name)) == variants


class TestGetReplacements:

    @staticmethod
    def configure_rules(*variants):
        rules = {'analyzer': 'generic', 'variants': [{'words': variants}]}
        trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
        norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
        return module.configure(rules, norm, trans)

    def get_replacements(self, *variants):
        config = self.configure_rules(*variants)

        return sorted((k, sorted(v)) for k, v in config['replacements'])

    @pytest.mark.parametrize("variant", ['foo &gt; bar', 'foo -&gt; bar -&gt; bar',
                                         '~foo~ -&gt; bar', 'fo~ o -&gt; bar'])
    def test_invalid_variant_description(self, variant):
        with pytest.raises(UsageError):
            self.configure_rules(variant)

    @pytest.mark.parametrize("rule", ["!!! -&gt; bar", "bar =&gt; !!!"])
    def test_ignore_unnormalizable_terms(self, rule):
        repl = self.get_replacements(rule)

        assert repl == []

    def test_add_full(self):
        repl = self.get_replacements("foo -&gt; bar")

        assert repl == [(' foo ', [' bar', ' foo'])]

    def test_replace_full(self):
        repl = self.get_replacements("foo =&gt; bar")

        assert repl == [(' foo ', [' bar'])]

    def test_add_suffix_no_decompose(self):
        repl = self.get_replacements("~berg |-&gt; bg")

        assert repl == [(' berg ', [' berg', ' bg']),
                        ('berg ', ['berg', 'bg'])]

    def test_replace_suffix_no_decompose(self):
        repl = self.get_replacements("~berg |=&gt; bg")

        assert repl == [(' berg ', [' bg']), ('berg ', ['bg'])]

    def test_add_suffix_decompose(self):
        repl = self.get_replacements("~berg -&gt; bg")

        assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
                        ('berg ', [' berg', ' bg', 'berg', 'bg'])]

    def test_replace_suffix_decompose(self):
        repl = self.get_replacements("~berg =&gt; bg")

        assert repl == [(' berg ', [' bg', 'bg']),
                        ('berg ', [' bg', 'bg'])]

    def test_add_prefix_no_compose(self):
        repl = self.get_replacements("hinter~ |-&gt; hnt")

        assert repl == [(' hinter', [' hinter', ' hnt']),
                        (' hinter ', [' hinter', ' hnt'])]

    def test_replace_prefix_no_compose(self):
        repl = self.get_replacements("hinter~ |=&gt; hnt")

        assert repl == [(' hinter', [' hnt']), (' hinter ', [' hnt'])]

    def test_add_prefix_compose(self):
        repl = self.get_replacements("hinter~-&gt; h")

        assert repl == [(' hinter', [' h', ' h ', ' hinter', ' hinter ']),
                        (' hinter ', [' h', ' h', ' hinter', ' hinter'])]

    def test_replace_prefix_compose(self):
        repl = self.get_replacements("hinter~=&gt; h")

        assert repl == [(' hinter', [' h', ' h ']),
                        (' hinter ', [' h', ' h'])]

    def test_add_beginning_only(self):
        repl = self.get_replacements("^Premier -&gt; Pr")

        assert repl == [('^ premier ', ['^ pr', '^ premier'])]

    def test_replace_beginning_only(self):
        repl = self.get_replacements("^Premier =&gt; Pr")

        assert repl == [('^ premier ', ['^ pr'])]

    def test_add_final_only(self):
        repl = self.get_replacements("road$ -&gt; rd")

        assert repl == [(' road ^', [' rd ^', ' road ^'])]

    def test_replace_final_only(self):
        repl = self.get_replacements("road$ =&gt; rd")

        assert repl == [(' road ^', [' rd ^'])]

    def test_decompose_only(self):
        repl = self.get_replacements("~foo -&gt; foo")

        assert repl == [(' foo ', [' foo', 'foo']),
                        ('foo ', [' foo', 'foo'])]

    def test_add_suffix_decompose_end_only(self):
        repl = self.get_replacements("~berg |-&gt; bg", "~berg$ -&gt; bg")

        assert repl == [(' berg ', [' berg', ' bg']),
                        (' berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^']),
                        ('berg ', ['berg', 'bg']),
                        ('berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^'])]

    def test_replace_suffix_decompose_end_only(self):
        repl = self.get_replacements("~berg |=&gt; bg", "~berg$ =&gt; bg")

        assert repl == [(' berg ', [' bg']),
                        (' berg ^', [' bg ^', 'bg ^']),
                        ('berg ', ['bg']),
                        ('berg ^', [' bg ^', 'bg ^'])]

    @pytest.mark.parametrize('rule', ["~berg,~burg -&gt; bg",
                                      "~berg, ~burg -&gt; bg",
                                      "~berg,,~burg -&gt; bg"])
    def test_add_multiple_suffix(self, rule):
        repl = self.get_replacements(rule)

        assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
                        (' burg ', [' bg', ' burg', 'bg', 'burg']),
                        ('berg ', [' berg', ' bg', 'berg', 'bg']),
                        ('burg ', [' bg', ' burg', 'bg', 'burg'])]
</pre></body></html>