]> git.openstreetmap.org Git - nominatim.git/commitdiff
Merge pull request #1902 from lonvia/avoid-touching-boundaries-in-addresses
authorSarah Hoffmann <lonvia@denofr.de>
Tue, 4 Aug 2020 12:30:08 +0000 (14:30 +0200)
committerGitHub <noreply@github.com>
Tue, 4 Aug 2020 12:30:08 +0000 (14:30 +0200)
Be more strict what areas make up an address

nominatim/indexer/__init__.py [new file with mode: 0644]
nominatim/indexer/progress.py [new file with mode: 0644]
nominatim/nominatim.py

diff --git a/nominatim/indexer/__init__.py b/nominatim/indexer/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/nominatim/indexer/progress.py b/nominatim/indexer/progress.py
new file mode 100644 (file)
index 0000000..456d3ea
--- /dev/null
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file is part of Nominatim.
+# Copyright (C) 2020 Sarah Hoffmann
+
+import logging
+from datetime import datetime
+
+log = logging.getLogger()
+
+class ProgressLogger(object):
+    """ Tracks and prints progress for the indexing process.
+        `name` is the name of the indexing step being tracked.
+        `total` sets up the total number of items that need processing.
+        `log_interval` denotes the interval in seconds at which progres
+        should be reported.
+    """
+
+    def __init__(self, name, total, log_interval=1):
+        self.name = name
+        self.total_places = total
+        self.done_places = 0
+        self.rank_start_time = datetime.now()
+        self.next_info = 100 if log.isEnabledFor(logging.INFO) else total + 1
+
+    def add(self, num=1):
+        """ Mark `num` places as processed. Print a log message if the
+            logging is at least info and the log interval has past.
+        """
+        self.done_places += num
+
+        if self.done_places >= self.next_info:
+            now = datetime.now()
+            done_time = (now - self.rank_start_time).total_seconds()
+            places_per_sec = self.done_places / done_time
+            eta = (self.total_places - self.done_places)/places_per_sec
+
+            log.info("Done {} in {} @ {:.3f} per second - {} ETA (seconds): {:.2f}"
+                     .format(self.done_places, int(done_time),
+                             places_per_sec, self.name, eta))
+
+            self.next_info += int(places_per_sec)
+
+    def done(self):
+        """ Print final staticstics about the progress.
+        """
+        rank_end_time = datetime.now()
+        diff_seconds = (rank_end_time-self.rank_start_time).total_seconds()
+
+        log.warning("Done {}/{} in {} @ {:.3f} per second - FINISHED {}\n".format(
+                    self.done_places, self.total_places, int(diff_seconds),
+                    self.done_places/diff_seconds, self.name))
index b29bf343e035e02cdfd62a43e0e0d3a01ebebcd5..e8600ca8dc41bb69cd19491c8a2b5abc01c302d1 100755 (executable)
@@ -32,6 +32,8 @@ import psycopg2
 from psycopg2.extras import wait_select
 import select
 
+from indexer.progress import ProgressLogger
+
 log = logging.getLogger()
 
 def make_connection(options, asynchronous=False):
@@ -55,24 +57,19 @@ class RankRunner(object):
     def name(self):
         return "rank {}".format(self.rank)
 
-    def sql_index_sectors(self):
-        return """SELECT geometry_sector, count(*) FROM placex
+    def sql_count_objects(self):
+        return """SELECT count(*) FROM placex
                   WHERE rank_search = {} and indexed_status > 0
-                  GROUP BY geometry_sector
-                  ORDER BY geometry_sector""".format(self.rank)
+               """.format(self.rank)
 
-    def sql_nosector_places(self):
+    def sql_get_objects(self):
         return """SELECT place_id FROM placex
                   WHERE indexed_status > 0 and rank_search = {}
                   ORDER BY geometry_sector""".format(self.rank)
 
-    def sql_sector_places(self):
-        return """SELECT place_id FROM placex
-                  WHERE indexed_status > 0 and rank_search = {}
-                        and geometry_sector = %s""".format(self.rank)
-
-    def sql_index_place(self):
-        return "UPDATE placex SET indexed_status = 0 WHERE place_id = %s"
+    def sql_index_place(self, ids):
+        return "UPDATE placex SET indexed_status = 0 WHERE place_id IN ({})"\
+               .format(','.join((str(i) for i in ids)))
 
 
 class InterpolationRunner(object):
@@ -83,25 +80,19 @@ class InterpolationRunner(object):
     def name(self):
         return "interpolation lines (location_property_osmline)"
 
-    def sql_index_sectors(self):
-        return """SELECT geometry_sector, count(*) FROM location_property_osmline
-                  WHERE indexed_status > 0
-                  GROUP BY geometry_sector
-                  ORDER BY geometry_sector"""
+    def sql_count_objects(self):
+        return """SELECT count(*) FROM location_property_osmline
+                  WHERE indexed_status > 0"""
 
-    def sql_nosector_places(self):
+    def sql_get_objects(self):
         return """SELECT place_id FROM location_property_osmline
                   WHERE indexed_status > 0
                   ORDER BY geometry_sector"""
 
-    def sql_sector_places(self):
-        return """SELECT place_id FROM location_property_osmline
-                  WHERE indexed_status > 0 and geometry_sector = %s
-                  ORDER BY geometry_sector"""
-
-    def sql_index_place(self):
+    def sql_index_place(self, ids):
         return """UPDATE location_property_osmline
-                  SET indexed_status = 0 WHERE place_id = %s"""
+                  SET indexed_status = 0 WHERE place_id IN ({})"""\
+               .format(','.join((str(i) for i in ids)))
 
 
 class DBConnection(object):
@@ -210,83 +201,48 @@ class Indexer(object):
             self.index(RankRunner(rank))
 
         if self.maxrank == 30:
-            self.index(InterpolationRunner())
+            self.index(InterpolationRunner(), 20)
 
-        self.index(RankRunner(self.maxrank))
+        self.index(RankRunner(self.maxrank), 20)
 
-    def index(self, obj):
+    def index(self, obj, batch=1):
         """ Index a single rank or table. `obj` describes the SQL to use
-            for indexing.
+            for indexing. `batch` describes the number of objects that
+            should be processed with a single SQL statement
         """
         log.warning("Starting {}".format(obj.name()))
 
-        cur = self.conn.cursor(name='main')
-        cur.execute(obj.sql_index_sectors())
+        cur = self.conn.cursor()
+        cur.execute(obj.sql_count_objects())
 
-        total_tuples = 0
-        for r in cur:
-            total_tuples += r[1]
-        log.debug("Total number of rows; {}".format(total_tuples))
+        total_tuples = cur.fetchone()[0]
+        log.debug("Total number of rows: {}".format(total_tuples))
 
-        cur.scroll(0, mode='absolute')
+        cur.close()
 
         next_thread = self.find_free_thread()
-        done_tuples = 0
-        rank_start_time = datetime.now()
-
-        sector_sql = obj.sql_sector_places()
-        index_sql = obj.sql_index_place()
-        min_grouped_tuples = total_tuples - len(self.threads) * 1000
+        progress = ProgressLogger(obj.name(), total_tuples)
 
-        next_info = 100 if log.isEnabledFor(logging.INFO) else total_tuples + 1
+        cur = self.conn.cursor(name='places')
+        cur.execute(obj.sql_get_objects())
 
-        for r in cur:
-            sector = r[0]
-
-            # Should we do the remaining ones together?
-            do_all = done_tuples > min_grouped_tuples
-
-            pcur = self.conn.cursor(name='places')
-
-            if do_all:
-                pcur.execute(obj.sql_nosector_places())
-            else:
-                pcur.execute(sector_sql, (sector, ))
-
-            for place in pcur:
-                place_id = place[0]
-                log.debug("Processing place {}".format(place_id))
-                thread = next(next_thread)
-
-                thread.perform(index_sql, (place_id,))
-                done_tuples += 1
+        while True:
+            places = [p[0] for p in cur.fetchmany(batch)]
+            if len(places) == 0:
+                break
 
-                if done_tuples >= next_info:
-                    now = datetime.now()
-                    done_time = (now - rank_start_time).total_seconds()
-                    tuples_per_sec = done_tuples / done_time
-                    log.info("Done {} in {} @ {:.3f} per second - {} ETA (seconds): {:.2f}"
-                           .format(done_tuples, int(done_time),
-                                   tuples_per_sec, obj.name(),
-                                   (total_tuples - done_tuples)/tuples_per_sec))
-                    next_info += int(tuples_per_sec)
+            log.debug("Processing places: {}".format(places))
+            thread = next(next_thread)
 
-            pcur.close()
-
-            if do_all:
-                break
+            thread.perform(obj.sql_index_place(places))
+            progress.add(len(places))
 
         cur.close()
 
         for t in self.threads:
             t.wait()
 
-        rank_end_time = datetime.now()
-        diff_seconds = (rank_end_time-rank_start_time).total_seconds()
-
-        log.warning("Done {}/{} in {} @ {:.3f} per second - FINISHED {}\n".format(
-                 done_tuples, total_tuples, int(diff_seconds),
-                 done_tuples/diff_seconds, obj.name()))
+        progress.done()
 
     def find_free_thread(self):
         """ Generator that returns the next connection that is free for