]> git.openstreetmap.org Git - nominatim.git/blobdiff - nominatim/nominatim.py
indexer: get rid of special handling of few places
[nominatim.git] / nominatim / nominatim.py
index 54d9b2085db36c3e5fe840228253a45d690ff2e8..f87203af36dfee91a818e4b8723d41436c11ccb1 100755 (executable)
@@ -35,9 +35,14 @@ import select
 log = logging.getLogger()
 
 def make_connection(options, asynchronous=False):
-    return psycopg2.connect(dbname=options.dbname, user=options.user,
-                            password=options.password, host=options.host,
-                            port=options.port, async_=asynchronous)
+    params = {'dbname' : options.dbname,
+              'user' : options.user,
+              'password' : options.password,
+              'host' : options.host,
+              'port' : options.port,
+              'async' : asynchronous}
+
+    return psycopg2.connect(**params)
 
 
 class RankRunner(object):
@@ -104,19 +109,48 @@ class DBConnection(object):
     """
 
     def __init__(self, options):
+        self.current_query = None
+        self.current_params = None
+
+        self.conn = None
+        self.connect()
+
+    def connect(self):
+        if self.conn is not None:
+            self.cursor.close()
+            self.conn.close()
+
         self.conn = make_connection(options, asynchronous=True)
         self.wait()
 
         self.cursor = self.conn.cursor()
-
-        self.current_query = None
-        self.current_params = None
+        # Disable JIT and parallel workers as they are known to cause problems.
+        # Update pg_settings instead of using SET because it does not yield
+        # errors on older versions of Postgres where the settings are not
+        # implemented.
+        self.perform(
+            """ UPDATE pg_settings SET setting = -1 WHERE name = 'jit_above_cost';
+                UPDATE pg_settings SET setting = 0 
+                   WHERE name = 'max_parallel_workers_per_gather';""")
+        self.wait()
 
     def wait(self):
         """ Block until any pending operation is done.
         """
-        wait_select(self.conn)
-        self.current_query = None
+        while True:
+            try:
+                wait_select(self.conn)
+                self.current_query = None
+                return
+            except psycopg2.extensions.TransactionRollbackError as e:
+                if e.pgcode == '40P01':
+                    log.info("Deadlock detected (params = {}), retry."
+                              .format(self.current_params))
+                    self.cursor.execute(self.current_query, self.current_params)
+                else:
+                    raise
+            except psycopg2.errors.DeadlockDetected:
+                self.cursor.execute(self.current_query, self.current_params)
 
     def perform(self, sql, args=None):
         """ Send SQL query to the server. Returns immediately without
@@ -150,6 +184,8 @@ class DBConnection(object):
                 self.cursor.execute(self.current_query, self.current_params)
             else:
                 raise
+        except psycopg2.errors.DeadlockDetected:
+            self.cursor.execute(self.current_query, self.current_params)
 
         return False
 
@@ -198,31 +234,22 @@ class Indexer(object):
         done_tuples = 0
         rank_start_time = datetime.now()
 
-        sector_sql = obj.sql_sector_places()
-        index_sql = obj.sql_index_place()
         min_grouped_tuples = total_tuples - len(self.threads) * 1000
 
         next_info = 100 if log.isEnabledFor(logging.INFO) else total_tuples + 1
 
+        pcur = self.conn.cursor()
+
         for r in cur:
             sector = r[0]
-
-            # Should we do the remaining ones together?
-            do_all = done_tuples > min_grouped_tuples
-
-            pcur = self.conn.cursor(name='places')
-
-            if do_all:
-                pcur.execute(obj.sql_nosector_places())
-            else:
-                pcur.execute(sector_sql, (sector, ))
+            pcur.execute(obj.sql_sector_places(), (sector, ))
 
             for place in pcur:
                 place_id = place[0]
                 log.debug("Processing place {}".format(place_id))
                 thread = next(next_thread)
 
-                thread.perform(index_sql, (place_id,))
+                thread.perform(obj.sql_index_place(), (place_id,))
                 done_tuples += 1
 
                 if done_tuples >= next_info:
@@ -235,11 +262,7 @@ class Indexer(object):
                                    (total_tuples - done_tuples)/tuples_per_sec))
                     next_info += int(tuples_per_sec)
 
-            pcur.close()
-
-            if do_all:
-                break
-
+        pcur.close()
         cur.close()
 
         for t in self.threads:
@@ -257,15 +280,27 @@ class Indexer(object):
             sending a query.
         """
         ready = self.threads
+        command_stat = 0
 
         while True:
             for thread in ready:
                 if thread.is_done():
+                    command_stat += 1
                     yield thread
 
-            ready, _, _ = select.select(self.threads, [], [])
+            # refresh the connections occasionaly to avoid potential
+            # memory leaks in Postgresql.
+            if command_stat > 100000:
+                for t in self.threads:
+                    while not t.is_done():
+                        t.wait()
+                    t.connect()
+                command_stat = 0
+                ready = self.threads
+            else:
+                ready, _, _ = select.select(self.threads, [], [])
 
-        assert(False, "Unreachable code")
+        assert False, "Unreachable code"
 
 
 def nominatim_arg_parser():