]> git.openstreetmap.org Git - nominatim.git/commitdiff
more formatting fixes
authorSarah Hoffmann <lonvia@denofr.de>
Mon, 12 Jul 2021 15:45:42 +0000 (17:45 +0200)
committerSarah Hoffmann <lonvia@denofr.de>
Mon, 12 Jul 2021 15:45:42 +0000 (17:45 +0200)
Found by flake8.

21 files changed:
nominatim/cli.py
nominatim/clicmd/api.py
nominatim/clicmd/args.py
nominatim/clicmd/refresh.py
nominatim/clicmd/replication.py
nominatim/db/async_connection.py
nominatim/db/sql_preprocessor.py
nominatim/db/utils.py
nominatim/indexer/indexer.py
nominatim/indexer/progress.py
nominatim/indexer/runners.py
nominatim/tokenizer/legacy_icu_tokenizer.py
nominatim/tokenizer/legacy_tokenizer.py
nominatim/tools/check_database.py
nominatim/tools/database_import.py
nominatim/tools/exec_utils.py
nominatim/tools/migration.py
nominatim/tools/postcodes.py
nominatim/tools/special_phrases/sp_importer.py
nominatim/tools/special_phrases/sp_wiki_loader.py
nominatim/tools/special_phrases/special_phrase.py

index 533a920e07e5937d689be322e056167ca3135884..5626deb4b5aa6d503e9efd345086f85617ffd487 100644 (file)
@@ -103,7 +103,7 @@ class CommandlineParser:
         return 1
 
 
         return 1
 
 
-##### Subcommand classes
+# Subcommand classes
 #
 # Each class needs to implement two functions: add_args() adds the CLI parameters
 # for the subfunction, run() executes the subcommand.
 #
 # Each class needs to implement two functions: add_args() adds the CLI parameters
 # for the subfunction, run() executes the subcommand.
index a555695224403fd3b647f9601f5888f78b506bfb..b99d37b87430631fe95fcdca4d59325dabc94c71 100644 (file)
@@ -90,7 +90,7 @@ class APISearch:
         if args.query:
             params = dict(q=args.query)
         else:
         if args.query:
             params = dict(q=args.query)
         else:
-            params = {k : getattr(args, k) for k, _ in STRUCTURED_QUERY if getattr(args, k)}
+            params = {k: getattr(args, k) for k, _ in STRUCTURED_QUERY if getattr(args, k)}
 
         for param, _ in EXTRADATA_PARAMS:
             if getattr(args, param):
 
         for param, _ in EXTRADATA_PARAMS:
             if getattr(args, param):
index ee1941875d56b8c1007cf6bea1d222672080fd22..996f48f26dbffeae3306d6b4afd13853657ea02b 100644 (file)
@@ -24,4 +24,4 @@ class NominatimArgs:
                                      main_data=self.config.TABLESPACE_PLACE_DATA,
                                      main_index=self.config.TABLESPACE_PLACE_INDEX
                                     )
                                      main_data=self.config.TABLESPACE_PLACE_DATA,
                                      main_index=self.config.TABLESPACE_PLACE_INDEX
                                     )
-                    )
+                   )
index fbc23350c7212478f8c8b0c9fa484ec154f9f070..969998ad5063d3233882c2db8084fc70f4b13194 100644 (file)
@@ -61,7 +61,7 @@ class UpdateRefresh:
                                   args.threads or 1)
                 indexer.index_postcodes()
             else:
                                   args.threads or 1)
                 indexer.index_postcodes()
             else:
-                LOG.error("The place table doesn\'t exist. " \
+                LOG.error("The place table doesn't exist. "
                           "Postcode updates on a frozen database is not possible.")
 
         if args.word_counts:
                           "Postcode updates on a frozen database is not possible.")
 
         if args.word_counts:
index 242b0f6a0b00c80bacc05ef01b97823e6bed2dfd..4c8cd44e2a77d351015600b3112bc4dca8d9f8ca 100644 (file)
@@ -93,7 +93,7 @@ class UpdateReplication:
                       indexed_only=not args.once)
 
         # Sanity check to not overwhelm the Geofabrik servers.
                       indexed_only=not args.once)
 
         # Sanity check to not overwhelm the Geofabrik servers.
-        if 'download.geofabrik.de'in params['base_url']\
+        if 'download.geofabrik.de' in params['base_url']\
            and params['update_interval'] < 86400:
             LOG.fatal("Update interval too low for download.geofabrik.de.\n"
                       "Please check install documentation "
            and params['update_interval'] < 86400:
             LOG.fatal("Update interval too low for download.geofabrik.de.\n"
                       "Please check install documentation "
index f06f3159f7b392c30526e988ef86aed5ecd4035a..a86c5bdcee13eb37c4874f29be286d2b06beecc0 100644 (file)
@@ -85,7 +85,7 @@ class DBConnection:
 
         # Use a dict to hand in the parameters because async is a reserved
         # word in Python3.
 
         # Use a dict to hand in the parameters because async is a reserved
         # word in Python3.
-        self.conn = psycopg2.connect(**{'dsn' : self.dsn, 'async' : True})
+        self.conn = psycopg2.connect(**{'dsn': self.dsn, 'async': True})
         self.wait()
 
         self.cursor = self.conn.cursor(cursor_factory=cursor_factory)
         self.wait()
 
         self.cursor = self.conn.cursor(cursor_factory=cursor_factory)
index dafc5de434bb3bf69a69014d7d7e20a2059f9313..d756a215618d316499af1261c4f48a35c801b20c 100644 (file)
@@ -61,7 +61,7 @@ def _setup_postgresql_features(conn):
     """
     pg_version = conn.server_version_tuple()
     return {
     """
     pg_version = conn.server_version_tuple()
     return {
-        'has_index_non_key_column' : pg_version >= (11, 0, 0)
+        'has_index_non_key_column': pg_version >= (11, 0, 0)
     }
 
 class SQLPreprocessor:
     }
 
 class SQLPreprocessor:
index 4d4305e7d67ff74c93119bbc67ef4acfa7036e2c..9a4a41a581661ced3048797b7ae1ff98d613dad0 100644 (file)
@@ -61,9 +61,9 @@ def execute_file(dsn, fname, ignore_errors=False, pre_code=None, post_code=None)
 
 
 # List of characters that need to be quoted for the copy command.
 
 
 # List of characters that need to be quoted for the copy command.
-_SQL_TRANSLATION = {ord(u'\\') : u'\\\\',
-                    ord(u'\t') : u'\\t',
-                    ord(u'\n') : u'\\n'}
+_SQL_TRANSLATION = {ord(u'\\'): u'\\\\',
+                    ord(u'\t'): u'\\t',
+                    ord(u'\n'): u'\\n'}
 
 class CopyBuffer:
     """ Data collector for the copy_from command.
 
 class CopyBuffer:
     """ Data collector for the copy_from command.
index 76883500b7a23469b1332d0ca0305551349b4dc6..d0cfb391c4dbdf7a63c875af6ec1b2d98ca88d0c 100644 (file)
@@ -203,7 +203,7 @@ class Indexer:
 
                                 # And insert the curent batch
                                 for idx in range(0, len(places), batch):
 
                                 # And insert the curent batch
                                 for idx in range(0, len(places), batch):
-                                    part = places[idx:idx+batch]
+                                    part = places[idx:idx + batch]
                                     LOG.debug("Processing places: %s", str(part))
                                     runner.index_places(pool.next_free_worker(), part)
                                     progress.add(len(part))
                                     LOG.debug("Processing places: %s", str(part))
                                     runner.index_places(pool.next_free_worker(), part)
                                     progress.add(len(part))
index 177e67b812aef0ea05116928c214ed5434f5a622..634b1fae703670d838bf1633a2d222b427dd43fa 100644 (file)
@@ -63,7 +63,7 @@ class ProgressLogger:
             places_per_sec = self.done_places
         else:
             diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
             places_per_sec = self.done_places
         else:
             diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
-            places_per_sec = self.done_places/diff_seconds
+            places_per_sec = self.done_places / diff_seconds
 
         LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
                     self.done_places, self.total_places, int(diff_seconds),
 
         LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
                     self.done_places, self.total_places, int(diff_seconds),
index aa607faae3f3d48988ebddd738a49c90ba4bb607..181de45a18af06a4792209c45d1278abcc35cdbc 100644 (file)
@@ -25,7 +25,7 @@ class AbstractPlacexRunner:
                    SET indexed_status = 0, address = v.addr, token_info = v.ti
                    FROM (VALUES {}) as v(id, addr, ti)
                    WHERE place_id = v.id
                    SET indexed_status = 0, address = v.addr, token_info = v.ti
                    FROM (VALUES {}) as v(id, addr, ti)
                    WHERE place_id = v.id
-               """.format(','.join(["(%s, %s::hstore, %s::jsonb)"]  * num_places))
+               """.format(','.join(["(%s, %s::hstore, %s::jsonb)"] * num_places))
 
 
     @staticmethod
 
 
     @staticmethod
@@ -124,7 +124,7 @@ class InterpolationRunner:
                    SET indexed_status = 0, address = v.addr, token_info = v.ti
                    FROM (VALUES {}) as v(id, addr, ti)
                    WHERE place_id = v.id
                    SET indexed_status = 0, address = v.addr, token_info = v.ti
                    FROM (VALUES {}) as v(id, addr, ti)
                    WHERE place_id = v.id
-               """.format(','.join(["(%s, %s::hstore, %s::jsonb)"]  * num_places))
+               """.format(','.join(["(%s, %s::hstore, %s::jsonb)"] * num_places))
 
 
     def index_places(self, worker, places):
 
 
     def index_places(self, worker, places):
index 12ee0404f55d013cfe44c279d9578c2126ddc914..31e8c44b70352cbf633f13c230279f3a986cdca1 100644 (file)
@@ -341,7 +341,7 @@ class LegacyICUNameAnalyzer:
                 term = self.name_processor.get_search_normalized(word)
                 if term:
                     copystr.add(word, ' ' + term, cls, typ,
                 term = self.name_processor.get_search_normalized(word)
                 if term:
                     copystr.add(word, ' ' + term, cls, typ,
-                                oper if oper in ('in', 'near')  else None, 0)
+                                oper if oper in ('in', 'near') else None, 0)
                     added += 1
 
             copystr.copy_out(cursor, 'word',
                     added += 1
 
             copystr.copy_out(cursor, 'word',
index 24af1c3a1a4013f035cac265bb116f644c53c8a0..a68b69ec070c150b4aa2673aa27b0218f2615494 100644 (file)
@@ -582,7 +582,7 @@ class _TokenCache:
         with conn.cursor() as cur:
             cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
                            FROM generate_series(1, 100) as i""")
         with conn.cursor() as cur:
             cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
                            FROM generate_series(1, 100) as i""")
-            self._cached_housenumbers = {str(r[0]) : r[1] for r in cur}
+            self._cached_housenumbers = {str(r[0]): r[1] for r in cur}
 
         # For postcodes remember the ones that have already been added
         self.postcodes = set()
 
         # For postcodes remember the ones that have already been added
         self.postcodes = set()
index d4f793b46334c27f304fbfe898268a4f23f564a9..d116554fea20f6e9b5e261adc2a48b0434fa5531 100644 (file)
@@ -24,6 +24,7 @@ def _check(hint=None):
     """
     def decorator(func):
         title = func.__doc__.split('\n', 1)[0].strip()
     """
     def decorator(func):
         title = func.__doc__.split('\n', 1)[0].strip()
+
         def run_check(conn, config):
             print(title, end=' ... ')
             ret = func(conn, config)
         def run_check(conn, config):
             print(title, end=' ... ')
             ret = func(conn, config)
@@ -98,13 +99,12 @@ def _get_indexes(conn):
     if conn.table_exists('place'):
         indexes.extend(('idx_placex_pendingsector',
                         'idx_location_area_country_place_id',
     if conn.table_exists('place'):
         indexes.extend(('idx_placex_pendingsector',
                         'idx_location_area_country_place_id',
-                        'idx_place_osm_unique'
-                       ))
+                        'idx_place_osm_unique'))
 
     return indexes
 
 
 
     return indexes
 
 
-### CHECK FUNCTIONS
+# CHECK FUNCTIONS
 #
 # Functions are exectured in the order they appear here.
 
 #
 # Functions are exectured in the order they appear here.
 
index df82f9aaf4a6e042eae58e6dab378ed8cd422b3b..75483971538d07db42f8f792e3bb82b7d7155f92 100644 (file)
@@ -184,8 +184,10 @@ def truncate_data_tables(conn):
 
     conn.commit()
 
 
     conn.commit()
 
+
 _COPY_COLUMNS = 'osm_type, osm_id, class, type, name, admin_level, address, extratags, geometry'
 
 _COPY_COLUMNS = 'osm_type, osm_id, class, type, name, admin_level, address, extratags, geometry'
 
+
 def load_data(dsn, threads):
     """ Copy data into the word and placex table.
     """
 def load_data(dsn, threads):
     """ Copy data into the word and placex table.
     """
@@ -250,6 +252,7 @@ def create_search_indices(conn, config, drop=False):
 
     sql.run_sql_file(conn, 'indices.sql', drop=drop)
 
 
     sql.run_sql_file(conn, 'indices.sql', drop=drop)
 
+
 def create_country_names(conn, tokenizer, languages=None):
     """ Add default country names to search index. `languages` is a comma-
         separated list of language codes as used in OSM. If `languages` is not
 def create_country_names(conn, tokenizer, languages=None):
     """ Add default country names to search index. `languages` is a comma-
         separated list of language codes as used in OSM. If `languages` is not
@@ -261,8 +264,7 @@ def create_country_names(conn, tokenizer, languages=None):
 
     def _include_key(key):
         return key == 'name' or \
 
     def _include_key(key):
         return key == 'name' or \
-               (key.startswith('name:') \
-                and (not languages or key[5:] in languages))
+               (key.startswith('name:') and (not languages or key[5:] in languages))
 
     with conn.cursor() as cur:
         psycopg2.extras.register_hstore(cur)
 
     with conn.cursor() as cur:
         psycopg2.extras.register_hstore(cur)
@@ -271,7 +273,7 @@ def create_country_names(conn, tokenizer, languages=None):
 
         with tokenizer.name_analyzer() as analyzer:
             for code, name in cur:
 
         with tokenizer.name_analyzer() as analyzer:
             for code, name in cur:
-                names = {'countrycode' : code}
+                names = {'countrycode': code}
                 if code == 'gb':
                     names['short_name'] = 'UK'
                 if code == 'us':
                 if code == 'gb':
                     names['short_name'] = 'UK'
                 if code == 'us':
index 560bb78166c54ba7859c5ea275522f8c70804f40..f91c56542759895b6d52d0a7a2f8ee6a2ceccd48 100644 (file)
@@ -136,7 +136,7 @@ def run_osm2pgsql(options):
 def get_url(url):
     """ Get the contents from the given URL and return it as a UTF-8 string.
     """
 def get_url(url):
     """ Get the contents from the given URL and return it as a UTF-8 string.
     """
-    headers = {"User-Agent" : "Nominatim/{0[0]}.{0[1]}.{0[2]}-{0[3]}".format(NOMINATIM_VERSION)}
+    headers = {"User-Agent": "Nominatim/{0[0]}.{0[1]}.{0[2]}-{0[3]}".format(NOMINATIM_VERSION)}
 
     try:
         with urlrequest.urlopen(urlrequest.Request(url, headers=headers)) as response:
 
     try:
         with urlrequest.urlopen(urlrequest.Request(url, headers=headers)) as response:
index de1e51013ffa00e4528a12318327343e38ccd11b..d7faca31f1f431b61383825ad105bee4163a6a07 100644 (file)
@@ -142,7 +142,8 @@ def change_housenumber_transliteration(conn, **_):
                        BEGIN
                          SELECT array_to_string(array_agg(trans), ';')
                            INTO normtext
                        BEGIN
                          SELECT array_to_string(array_agg(trans), ';')
                            INTO normtext
-                           FROM (SELECT lookup_word as trans, getorcreate_housenumber_id(lookup_word)
+                           FROM (SELECT lookup_word as trans,
+                                        getorcreate_housenumber_id(lookup_word)
                                  FROM (SELECT make_standard_name(h) as lookup_word
                                        FROM regexp_split_to_table(housenumber, '[,;]') h) x) y;
                          return normtext;
                                  FROM (SELECT make_standard_name(h) as lookup_word
                                        FROM regexp_split_to_table(housenumber, '[,;]') h) x) y;
                          return normtext;
index cfd242e24d8d358cf48f77f2930e8a1a7171ef03..3f8f4e382081b686af461cb412ca7a96f424f218 100644 (file)
@@ -165,11 +165,14 @@ def update_postcodes(dsn, project_dir, tokenizer):
             with conn.cursor(name="placex_postcodes") as cur:
                 cur.execute("""
                 SELECT cc as country_code, pc, ST_X(centroid), ST_Y(centroid)
             with conn.cursor(name="placex_postcodes") as cur:
                 cur.execute("""
                 SELECT cc as country_code, pc, ST_X(centroid), ST_Y(centroid)
-                FROM (SELECT 
-                        COALESCE(plx.country_code, get_country_code(ST_Centroid(pl.geometry))) as cc,
+                FROM (SELECT
+                        COALESCE(plx.country_code,
+                                 get_country_code(ST_Centroid(pl.geometry))) as cc,
                         token_normalized_postcode(pl.address->'postcode') as pc,
                         token_normalized_postcode(pl.address->'postcode') as pc,
-                        ST_Centroid(ST_Collect(COALESCE(plx.centroid, ST_Centroid(pl.geometry)))) as centroid 
-                        FROM place AS pl LEFT OUTER JOIN placex AS plx ON pl.osm_id = plx.osm_id AND pl.osm_type = plx.osm_type
+                        ST_Centroid(ST_Collect(COALESCE(plx.centroid,
+                                                        ST_Centroid(pl.geometry)))) as centroid
+                      FROM place AS pl LEFT OUTER JOIN placex AS plx
+                             ON pl.osm_id = plx.osm_id AND pl.osm_type = plx.osm_type
                     WHERE pl.address ? 'postcode' AND pl.geometry IS NOT null
                     GROUP BY cc, pc) xx
                 WHERE pc IS NOT null AND cc IS NOT null
                     WHERE pl.address ? 'postcode' AND pl.geometry IS NOT null
                     GROUP BY cc, pc) xx
                 WHERE pc IS NOT null AND cc IS NOT null
index 681990fa6175be2935f6b59e763fe4dab6239792..a26ea8a6201ae8b10891a85e457fbbe12778e902 100644 (file)
@@ -44,8 +44,8 @@ class SPImporter():
         # This set will contain all existing phrases to be added.
         # It contains tuples with the following format: (lable, class, type, operator)
         self.word_phrases = set()
         # This set will contain all existing phrases to be added.
         # It contains tuples with the following format: (lable, class, type, operator)
         self.word_phrases = set()
-        #This set will contain all existing place_classtype tables which doesn't match any
-        #special phrases class/type on the wiki.
+        # This set will contain all existing place_classtype tables which doesn't match any
+        # special phrases class/type on the wiki.
         self.table_phrases_to_delete = set()
 
     def import_phrases(self, tokenizer, should_replace):
         self.table_phrases_to_delete = set()
 
     def import_phrases(self, tokenizer, should_replace):
@@ -60,7 +60,7 @@ class SPImporter():
         LOG.warning('Special phrases importation starting')
         self._fetch_existing_place_classtype_tables()
 
         LOG.warning('Special phrases importation starting')
         self._fetch_existing_place_classtype_tables()
 
-        #Store pairs of class/type for further processing
+        # Store pairs of class/type for further processing
         class_type_pairs = set()
 
         for loaded_phrases in self.sp_loader:
         class_type_pairs = set()
 
         for loaded_phrases in self.sp_loader:
@@ -131,17 +131,17 @@ class SPImporter():
             Return the class/type pair corresponding to the phrase.
         """
 
             Return the class/type pair corresponding to the phrase.
         """
 
-        #blacklisting: disallow certain class/type combinations
+        # blacklisting: disallow certain class/type combinations
         if phrase.p_class in self.black_list.keys() \
            and phrase.p_type in self.black_list[phrase.p_class]:
             return None
 
         if phrase.p_class in self.black_list.keys() \
            and phrase.p_type in self.black_list[phrase.p_class]:
             return None
 
-        #whitelisting: if class is in whitelist, allow only tags in the list
+        # whitelisting: if class is in whitelist, allow only tags in the list
         if phrase.p_class in self.white_list.keys() \
            and phrase.p_type not in self.white_list[phrase.p_class]:
             return None
 
         if phrase.p_class in self.white_list.keys() \
            and phrase.p_type not in self.white_list[phrase.p_class]:
             return None
 
-        #sanity check, in case somebody added garbage in the wiki
+        # sanity check, in case somebody added garbage in the wiki
         if not self._check_sanity(phrase):
             self.statistics_handler.notify_one_phrase_invalid()
             return None
         if not self._check_sanity(phrase):
             self.statistics_handler.notify_one_phrase_invalid()
             return None
@@ -161,7 +161,7 @@ class SPImporter():
 
         sql_tablespace = self.config.TABLESPACE_AUX_DATA
         if sql_tablespace:
 
         sql_tablespace = self.config.TABLESPACE_AUX_DATA
         if sql_tablespace:
-            sql_tablespace = ' TABLESPACE '+sql_tablespace
+            sql_tablespace = ' TABLESPACE ' + sql_tablespace
 
         with self.db_connection.cursor() as db_cursor:
             db_cursor.execute("CREATE INDEX idx_placex_classtype ON placex (class, type)")
 
         with self.db_connection.cursor() as db_cursor:
             db_cursor.execute("CREATE INDEX idx_placex_classtype ON placex (class, type)")
@@ -174,19 +174,19 @@ class SPImporter():
 
             if table_name in self.table_phrases_to_delete:
                 self.statistics_handler.notify_one_table_ignored()
 
             if table_name in self.table_phrases_to_delete:
                 self.statistics_handler.notify_one_table_ignored()
-                #Remove this table from the ones to delete as it match a class/type
-                #still existing on the special phrases of the wiki.
+                # Remove this table from the ones to delete as it match a
+                # class/type still existing on the special phrases of the wiki.
                 self.table_phrases_to_delete.remove(table_name)
                 self.table_phrases_to_delete.remove(table_name)
-                #So dont need to create the table and indexes.
+                # So don't need to create the table and indexes.
                 continue
 
                 continue
 
-            #Table creation
+            # Table creation
             self._create_place_classtype_table(sql_tablespace, phrase_class, phrase_type)
 
             self._create_place_classtype_table(sql_tablespace, phrase_class, phrase_type)
 
-            #Indexes creation
+            # Indexes creation
             self._create_place_classtype_indexes(sql_tablespace, phrase_class, phrase_type)
 
             self._create_place_classtype_indexes(sql_tablespace, phrase_class, phrase_type)
 
-            #Grant access on read to the web user.
+            # Grant access on read to the web user.
             self._grant_access_to_webuser(phrase_class, phrase_type)
 
             self.statistics_handler.notify_one_table_created()
             self._grant_access_to_webuser(phrase_class, phrase_type)
 
             self.statistics_handler.notify_one_table_created()
@@ -202,8 +202,8 @@ class SPImporter():
         table_name = _classtype_table(phrase_class, phrase_type)
         with self.db_connection.cursor() as db_cursor:
             db_cursor.execute(SQL("""
         table_name = _classtype_table(phrase_class, phrase_type)
         with self.db_connection.cursor() as db_cursor:
             db_cursor.execute(SQL("""
-                    CREATE TABLE IF NOT EXISTS {{}} {} 
-                    AS SELECT place_id AS place_id,st_centroid(geometry) AS centroid FROM placex 
+                    CREATE TABLE IF NOT EXISTS {{}} {}
+                    AS SELECT place_id AS place_id,st_centroid(geometry) AS centroid FROM placex
                     WHERE class = {{}} AND type = {{}}""".format(sql_tablespace))
                               .format(Identifier(table_name), Literal(phrase_class),
                                       Literal(phrase_type)))
                     WHERE class = {{}} AND type = {{}}""".format(sql_tablespace))
                               .format(Identifier(table_name), Literal(phrase_class),
                                       Literal(phrase_type)))
@@ -215,7 +215,7 @@ class SPImporter():
         """
         index_prefix = 'idx_place_classtype_{}_{}_'.format(phrase_class, phrase_type)
         base_table = _classtype_table(phrase_class, phrase_type)
         """
         index_prefix = 'idx_place_classtype_{}_{}_'.format(phrase_class, phrase_type)
         base_table = _classtype_table(phrase_class, phrase_type)
-        #Index on centroid
+        # Index on centroid
         if not self.db_connection.index_exists(index_prefix + 'centroid'):
             with self.db_connection.cursor() as db_cursor:
                 db_cursor.execute(SQL("""
         if not self.db_connection.index_exists(index_prefix + 'centroid'):
             with self.db_connection.cursor() as db_cursor:
                 db_cursor.execute(SQL("""
@@ -223,7 +223,7 @@ class SPImporter():
                                   .format(Identifier(index_prefix + 'centroid'),
                                           Identifier(base_table)), sql_tablespace)
 
                                   .format(Identifier(index_prefix + 'centroid'),
                                           Identifier(base_table)), sql_tablespace)
 
-        #Index on place_id
+        # Index on place_id
         if not self.db_connection.index_exists(index_prefix + 'place_id'):
             with self.db_connection.cursor() as db_cursor:
                 db_cursor.execute(SQL(
         if not self.db_connection.index_exists(index_prefix + 'place_id'):
             with self.db_connection.cursor() as db_cursor:
                 db_cursor.execute(SQL(
@@ -248,10 +248,12 @@ class SPImporter():
             Delete the place_classtype tables.
         """
         LOG.warning('Cleaning database...')
             Delete the place_classtype tables.
         """
         LOG.warning('Cleaning database...')
-        #Array containing all queries to execute. Contain tuples of format (query, parameters)
+        # Array containing all queries to execute.
+        # Contains tuples of format (query, parameters)
         queries_parameters = []
 
         queries_parameters = []
 
-        #Delete place_classtype tables corresponding to class/type which are not on the wiki anymore
+        # Delete place_classtype tables corresponding to class/type which
+        # are not on the wiki anymore.
         for table in self.table_phrases_to_delete:
             self.statistics_handler.notify_one_table_deleted()
             query = SQL('DROP TABLE IF EXISTS {}').format(Identifier(table))
         for table in self.table_phrases_to_delete:
             self.statistics_handler.notify_one_table_deleted()
             query = SQL('DROP TABLE IF EXISTS {}').format(Identifier(table))
@@ -271,7 +273,7 @@ class SPImporter():
         file, extension = os.path.splitext(file_path)
         json_file_path = Path(file + '.json').resolve()
 
         file, extension = os.path.splitext(file_path)
         json_file_path = Path(file + '.json').resolve()
 
-        if extension not in('.php', '.json'):
+        if extension not in ('.php', '.json'):
             raise UsageError('The custom NOMINATIM_PHRASE_CONFIG file has not a valid extension.')
 
         if extension == '.php' and not isfile(json_file_path):
             raise UsageError('The custom NOMINATIM_PHRASE_CONFIG file has not a valid extension.')
 
         if extension == '.php' and not isfile(json_file_path):
index 914e15391123cf2571c99924a17d446c7bb8415c..1ad9de7e2cfa299dc18b68e873bf6f00c9592432 100644 (file)
@@ -15,7 +15,7 @@ class SPWikiLoader(Iterator):
     def __init__(self, config, languages=None):
         super().__init__()
         self.config = config
     def __init__(self, config, languages=None):
         super().__init__()
         self.config = config
-        #Compile the regex here to increase performances.
+        # Compile the regex here to increase performances.
         self.occurence_pattern = re.compile(
             r'\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([\-YN])'
         )
         self.occurence_pattern = re.compile(
             r'\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([\-YN])'
         )
@@ -35,7 +35,7 @@ class SPWikiLoader(Iterator):
             Parses XML content and extracts special phrases from it.
             Return a list of SpecialPhrase.
         """
             Parses XML content and extracts special phrases from it.
             Return a list of SpecialPhrase.
         """
-        #One match will be of format [label, class, type, operator, plural]
+        # One match will be of format [label, class, type, operator, plural]
         matches = self.occurence_pattern.findall(xml)
         returned_phrases = set()
         for match in matches:
         matches = self.occurence_pattern.findall(xml)
         returned_phrases = set()
         for match in matches:
@@ -65,5 +65,6 @@ class SPWikiLoader(Iterator):
             Requested URL Example :
                 https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/EN
         """
             Requested URL Example :
                 https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/EN
         """
-        url = 'https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/' + lang.upper() # pylint: disable=line-too-long
+        url = 'https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/' \
+              + lang.upper()
         return get_url(url)
         return get_url(url)
index 448fbee47b283717e80ba01d28897ff8a08635e4..da7968cac9c0845917f4593da1c41c3b1b9ac18d 100644 (file)
@@ -13,7 +13,7 @@ class SpecialPhrase():
     def __init__(self, p_label, p_class, p_type, p_operator):
         self.p_label = p_label.strip()
         self.p_class = p_class.strip()
     def __init__(self, p_label, p_class, p_type, p_operator):
         self.p_label = p_label.strip()
         self.p_class = p_class.strip()
-        #Hack around a bug where building=yes was imported with quotes into the wiki
+        # Hack around a bug where building=yes was imported with quotes into the wiki
         self.p_type = re.sub(r'\"|&quot;', '', p_type.strip())
         self.p_type = re.sub(r'\"|&quot;', '', p_type.strip())
-        #Needed if some operator in the wiki are not written in english
+        # Needed if some operator in the wiki are not written in english
         self.p_operator = '-' if p_operator not in ('near', 'in') else p_operator
         self.p_operator = '-' if p_operator not in ('near', 'in') else p_operator