1# -*- coding: utf-8 -*-
2# Part of Odoo. See LICENSE file for full copyright and licensing details.
3
4
5"""
6The PostgreSQL connector is a connectivity layer between the OpenERP code and
7the database, *not* a database abstraction toolkit. Database abstraction is what
8the ORM does, in fact.
9"""
10
11from contextlib import contextmanager
12from functools import wraps
13import itertools
14import logging
15import time
16import uuid
17import warnings
18
19from decorator import decorator
20import psycopg2
21import psycopg2.extras
22import psycopg2.extensions
23from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
24from psycopg2.pool import PoolError
25from werkzeug import urls
26
27from odoo.api import Environment
28
29psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
30
31_logger = logging.getLogger(__name__)
32
33def unbuffer(symb, cr):
34    if symb is None:
35        return None
36    return str(symb)
37
38def undecimalize(symb, cr):
39    if symb is None:
40        return None
41    return float(symb)
42
43psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
44
45
46from . import tools
47from .tools.func import frame_codeinfo
48
49from .tools import parse_version as pv
50if pv(psycopg2.__version__) < pv('2.7'):
51    from psycopg2._psycopg import QuotedString
52    def adapt_string(adapted):
53        """Python implementation of psycopg/psycopg2#459 from v2.7"""
54        if '\x00' in adapted:
55            raise ValueError("A string literal cannot contain NUL (0x00) characters.")
56        return QuotedString(adapted)
57
58    psycopg2.extensions.register_adapter(str, adapt_string)
59
60from datetime import timedelta
61import threading
62from inspect import currentframe
63
64
65def flush_env(cr, *, clear=True):
66    """ Retrieve and flush an environment corresponding to the given cursor.
67        Also clear the environment if ``clear`` is true.
68    """
69    env_to_flush = None
70    for env in list(Environment.envs):
71        # don't flush() on another cursor or with a RequestUID
72        if env.cr is cr and (isinstance(env.uid, int) or env.uid is None):
73            env_to_flush = env
74            if env.uid is not None:
75                break               # prefer an environment with a real uid
76
77    if env_to_flush is not None:
78        env_to_flush['base'].flush()
79        if clear:
80            env_to_flush.clear()    # clear remaining new records to compute
81
82def clear_env(cr):
83    """ Retrieve and clear an environment corresponding to the given cursor """
84    for env in list(Environment.envs):
85        if env.cr is cr:
86            env.clear()
87            break
88
89import re
90re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
91re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
92
93sql_counter = 0
94
95
96@decorator
97def check(f, self, *args, **kwargs):
98    """ Wrap a cursor method that cannot be called when the cursor is closed. """
99    if self._closed:
100        raise psycopg2.OperationalError('Unable to use a closed cursor.')
101    return f(self, *args, **kwargs)
102
103
104class BaseCursor:
105    """ Base class for cursors that manage pre/post commit hooks. """
106
107    def __init__(self):
108        self.precommit = tools.Callbacks()
109        self.postcommit = tools.Callbacks()
110        self.prerollback = tools.Callbacks()
111        self.postrollback = tools.Callbacks()
112
113    @contextmanager
114    @check
115    def savepoint(self, flush=True):
116        """context manager entering in a new savepoint"""
117        name = uuid.uuid1().hex
118        if flush:
119            flush_env(self, clear=False)
120            self.precommit.run()
121        self.execute('SAVEPOINT "%s"' % name)
122        try:
123            yield
124            if flush:
125                flush_env(self, clear=False)
126                self.precommit.run()
127        except Exception:
128            if flush:
129                clear_env(self)
130                self.precommit.clear()
131            self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
132            raise
133        else:
134            self.execute('RELEASE SAVEPOINT "%s"' % name)
135
136    def __enter__(self):
137        """ Using the cursor as a contextmanager automatically commits and
138            closes it::
139
140                with cr:
141                    cr.execute(...)
142
143                # cr is committed if no failure occurred
144                # cr is closed in any case
145        """
146        return self
147
148    def __exit__(self, exc_type, exc_value, traceback):
149        if exc_type is None:
150            self.commit()
151        self.close()
152
153
154class Cursor(BaseCursor):
155    """Represents an open transaction to the PostgreSQL DB backend,
156       acting as a lightweight wrapper around psycopg2's
157       ``cursor`` objects.
158
159        ``Cursor`` is the object behind the ``cr`` variable used all
160        over the OpenERP code.
161
162        .. rubric:: Transaction Isolation
163
164        One very important property of database transactions is the
165        level of isolation between concurrent transactions.
166        The SQL standard defines four levels of transaction isolation,
167        ranging from the most strict *Serializable* level, to the least
168        strict *Read Uncommitted* level. These levels are defined in
169        terms of the phenomena that must not occur between concurrent
170        transactions, such as *dirty read*, etc.
171        In the context of a generic business data management software
172        such as OpenERP, we need the best guarantees that no data
173        corruption can ever be cause by simply running multiple
174        transactions in parallel. Therefore, the preferred level would
175        be the *serializable* level, which ensures that a set of
176        transactions is guaranteed to produce the same effect as
177        running them one at a time in some order.
178
179        However, most database management systems implement a limited
180        serializable isolation in the form of
181        `snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
182        providing most of the same advantages as True Serializability,
183        with a fraction of the performance cost.
184        With PostgreSQL up to version 9.0, this snapshot isolation was
185        the implementation of both the ``REPEATABLE READ`` and
186        ``SERIALIZABLE`` levels of the SQL standard.
187        As of PostgreSQL 9.1, the previous snapshot isolation implementation
188        was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
189        level was introduced, providing some additional heuristics to
190        detect a concurrent update by parallel transactions, and forcing
191        one of them to rollback.
192
193        OpenERP implements its own level of locking protection
194        for transactions that are highly likely to provoke concurrent
195        updates, such as stock reservations or document sequences updates.
196        Therefore we mostly care about the properties of snapshot isolation,
197        but we don't really need additional heuristics to trigger transaction
198        rollbacks, as we are taking care of triggering instant rollbacks
199        ourselves when it matters (and we can save the additional performance
200        hit of these heuristics).
201
202        As a result of the above, we have selected ``REPEATABLE READ`` as
203        the default transaction isolation level for OpenERP cursors, as
204        it will be mapped to the desired ``snapshot isolation`` level for
205        all supported PostgreSQL version (8.3 - 9.x).
206
207        Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
208        read level to serializable before sending it to the database, so it would
209        actually select the new serializable mode on PostgreSQL 9.1. Make
210        sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
211        the performance hit is a concern for you.
212
213        .. attribute:: cache
214
215            Cache dictionary with a "request" (-ish) lifecycle, only lives as
216            long as the cursor itself does and proactively cleared when the
217            cursor is closed.
218
219            This cache should *only* be used to store repeatable reads as it
220            ignores rollbacks and savepoints, it should not be used to store
221            *any* data which may be modified during the life of the cursor.
222
223    """
224    IN_MAX = 1000   # decent limit on size of IN queries - guideline = Oracle limit
225
226    def __init__(self, pool, dbname, dsn, serialized=True):
227        super().__init__()
228
229        self.sql_from_log = {}
230        self.sql_into_log = {}
231
232        # default log level determined at cursor creation, could be
233        # overridden later for debugging purposes
234        self.sql_log = _logger.isEnabledFor(logging.DEBUG)
235
236        self.sql_log_count = 0
237
238        # avoid the call of close() (by __del__) if an exception
239        # is raised by any of the following initialisations
240        self._closed = True
241
242        self.__pool = pool
243        self.dbname = dbname
244        # Whether to enable snapshot isolation level for this cursor.
245        # see also the docstring of Cursor.
246        self._serialized = serialized
247
248        self._cnx = pool.borrow(dsn)
249        self._obj = self._cnx.cursor()
250        if self.sql_log:
251            self.__caller = frame_codeinfo(currentframe(), 2)
252        else:
253            self.__caller = False
254        self._closed = False   # real initialisation value
255        self.autocommit(False)
256
257        self._default_log_exceptions = True
258
259        self.cache = {}
260
261    def __build_dict(self, row):
262        return {d.name: row[i] for i, d in enumerate(self._obj.description)}
263    def dictfetchone(self):
264        row = self._obj.fetchone()
265        return row and self.__build_dict(row)
266    def dictfetchmany(self, size):
267        return [self.__build_dict(row) for row in self._obj.fetchmany(size)]
268    def dictfetchall(self):
269        return [self.__build_dict(row) for row in self._obj.fetchall()]
270
271    def __del__(self):
272        if not self._closed and not self._cnx.closed:
273            # Oops. 'self' has not been closed explicitly.
274            # The cursor will be deleted by the garbage collector,
275            # but the database connection is not put back into the connection
276            # pool, preventing some operation on the database like dropping it.
277            # This can also lead to a server overload.
278            msg = "Cursor not closed explicitly\n"
279            if self.__caller:
280                msg += "Cursor was created at %s:%s" % self.__caller
281            else:
282                msg += "Please enable sql debugging to trace the caller."
283            _logger.warning(msg)
284            self._close(True)
285
286    @check
287    def execute(self, query, params=None, log_exceptions=None):
288        if params and not isinstance(params, (tuple, list, dict)):
289            # psycopg2's TypeError is not clear if you mess up the params
290            raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
291
292        if self.sql_log:
293            encoding = psycopg2.extensions.encodings[self.connection.encoding]
294            _logger.debug("query: %s", self._obj.mogrify(query, params).decode(encoding, 'replace'))
295        now = time.time()
296        try:
297            params = params or None
298            res = self._obj.execute(query, params)
299        except Exception as e:
300            if self._default_log_exceptions if log_exceptions is None else log_exceptions:
301                _logger.error("bad query: %s\nERROR: %s", tools.ustr(self._obj.query or query), e)
302            raise
303
304        # simple query count is always computed
305        self.sql_log_count += 1
306        delay = (time.time() - now)
307        if hasattr(threading.current_thread(), 'query_count'):
308            threading.current_thread().query_count += 1
309            threading.current_thread().query_time += delay
310
311        # advanced stats only if sql_log is enabled
312        if self.sql_log:
313            delay *= 1E6
314
315            query_lower = self._obj.query.decode().lower()
316            res_from = re_from.match(query_lower)
317            if res_from:
318                self.sql_from_log.setdefault(res_from.group(1), [0, 0])
319                self.sql_from_log[res_from.group(1)][0] += 1
320                self.sql_from_log[res_from.group(1)][1] += delay
321            res_into = re_into.match(query_lower)
322            if res_into:
323                self.sql_into_log.setdefault(res_into.group(1), [0, 0])
324                self.sql_into_log[res_into.group(1)][0] += 1
325                self.sql_into_log[res_into.group(1)][1] += delay
326        return res
327
328    def split_for_in_conditions(self, ids, size=None):
329        """Split a list of identifiers into one or more smaller tuples
330           safe for IN conditions, after uniquifying them."""
331        return tools.misc.split_every(size or self.IN_MAX, ids)
332
333    def print_log(self):
334        global sql_counter
335
336        if not self.sql_log:
337            return
338        def process(type):
339            sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
340            sum = 0
341            if sqllogs[type]:
342                sqllogitems = sqllogs[type].items()
343                _logger.debug("SQL LOG %s:", type)
344                for r in sorted(sqllogitems, key=lambda k: k[1]):
345                    delay = timedelta(microseconds=r[1][1])
346                    _logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
347                    sum += r[1][1]
348                sqllogs[type].clear()
349            sum = timedelta(microseconds=sum)
350            _logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
351            sqllogs[type].clear()
352        process('from')
353        process('into')
354        self.sql_log_count = 0
355        self.sql_log = False
356
357    @check
358    def close(self):
359        return self._close(False)
360
361    def _close(self, leak=False):
362        global sql_counter
363
364        if not self._obj:
365            return
366
367        del self.cache
368
369        # simple query count is always computed
370        sql_counter += self.sql_log_count
371
372        # advanced stats only if sql_log is enabled
373        self.print_log()
374
375        self._obj.close()
376
377        # This force the cursor to be freed, and thus, available again. It is
378        # important because otherwise we can overload the server very easily
379        # because of a cursor shortage (because cursors are not garbage
380        # collected as fast as they should). The problem is probably due in
381        # part because browse records keep a reference to the cursor.
382        del self._obj
383
384        # Clean the underlying connection, and run rollback hooks.
385        self.rollback()
386
387        self._closed = True
388
389        if leak:
390            self._cnx.leaked = True
391        else:
392            chosen_template = tools.config['db_template']
393            templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
394            keep_in_pool = self.dbname not in templates_list
395            self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
396
397    @check
398    def autocommit(self, on):
399        if on:
400            isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
401        else:
402            # If a serializable cursor was requested, we
403            # use the appropriate PotsgreSQL isolation level
404            # that maps to snaphsot isolation.
405            # For all supported PostgreSQL versions (8.3-9.x),
406            # this is currently the ISOLATION_REPEATABLE_READ.
407            # See also the docstring of this class.
408            # NOTE: up to psycopg 2.4.2, repeatable read
409            #       is remapped to serializable before being
410            #       sent to the database, so it is in fact
411            #       unavailable for use with pg 9.1.
412            isolation_level = \
413                ISOLATION_LEVEL_REPEATABLE_READ \
414                if self._serialized \
415                else ISOLATION_LEVEL_READ_COMMITTED
416        self._cnx.set_isolation_level(isolation_level)
417
418    @check
419    def after(self, event, func):
420        """ Register an event handler.
421
422            :param event: the event, either `'commit'` or `'rollback'`
423            :param func: a callable object, called with no argument after the
424                event occurs
425
426            Be careful when coding an event handler, since any operation on the
427            cursor that was just committed/rolled back will take place in the
428            next transaction that has already begun, and may still be rolled
429            back or committed independently. You may consider the use of a
430            dedicated temporary cursor to do some database operation.
431        """
432        warnings.warn(
433            "Cursor.after() is deprecated, use Cursor.postcommit.add() instead.",
434            DeprecationWarning,
435        )
436        if event == 'commit':
437            self.postcommit.add(func)
438        elif event == 'rollback':
439            self.postrollback.add(func)
440
441    @check
442    def commit(self):
443        """ Perform an SQL `COMMIT` """
444        flush_env(self)
445        self.precommit.run()
446        result = self._cnx.commit()
447        self.prerollback.clear()
448        self.postrollback.clear()
449        self.postcommit.run()
450        return result
451
452    @check
453    def rollback(self):
454        """ Perform an SQL `ROLLBACK` """
455        clear_env(self)
456        self.precommit.clear()
457        self.postcommit.clear()
458        self.prerollback.run()
459        result = self._cnx.rollback()
460        self.postrollback.run()
461        return result
462
463    @check
464    def __getattr__(self, name):
465        return getattr(self._obj, name)
466
467    @property
468    def closed(self):
469        return self._closed
470
471
472class TestCursor(BaseCursor):
473    """ A pseudo-cursor to be used for tests, on top of a real cursor. It keeps
474        the transaction open across requests, and simulates committing, rolling
475        back, and closing:
476
477              test cursor           | queries on actual cursor
478            ------------------------+---------------------------------------
479              cr = TestCursor(...)  | SAVEPOINT test_cursor_N
480                                    |
481              cr.execute(query)     | query
482                                    |
483              cr.commit()           | SAVEPOINT test_cursor_N
484                                    |
485              cr.rollback()         | ROLLBACK TO SAVEPOINT test_cursor_N
486                                    |
487              cr.close()            | ROLLBACK TO SAVEPOINT test_cursor_N
488                                    |
489
490    """
491    _savepoint_seq = itertools.count()
492
493    def __init__(self, cursor, lock):
494        self._closed = False
495        self._cursor = cursor
496        # we use a lock to serialize concurrent requests
497        self._lock = lock
498        self._lock.acquire()
499        # in order to simulate commit and rollback, the cursor maintains a
500        # savepoint at its last commit
501        self._savepoint = "test_cursor_%s" % next(self._savepoint_seq)
502        self._cursor.execute('SAVEPOINT "%s"' % self._savepoint)
503
504    def close(self):
505        if not self._closed:
506            self.rollback()
507            self._closed = True
508            self._lock.release()
509
510    def autocommit(self, on):
511        _logger.debug("TestCursor.autocommit(%r) does nothing", on)
512
513    @check
514    def commit(self):
515        """ Perform an SQL `COMMIT` """
516        flush_env(self)
517        self.precommit.run()
518        self._cursor.execute('SAVEPOINT "%s"' % self._savepoint)
519        self.prerollback.clear()
520        self.postrollback.clear()
521        self.postcommit.clear()         # TestCursor ignores post-commit hooks
522
523    @check
524    def rollback(self):
525        """ Perform an SQL `ROLLBACK` """
526        clear_env(self)
527        self.precommit.clear()
528        self.postcommit.clear()
529        self.prerollback.run()
530        self._cursor.execute('ROLLBACK TO SAVEPOINT "%s"' % self._savepoint)
531        self.postrollback.run()
532
533    def __getattr__(self, name):
534        value = getattr(self._cursor, name)
535        if callable(value) and self._closed:
536            raise psycopg2.OperationalError('Unable to use a closed cursor.')
537        return value
538
539
540class PsycoConnection(psycopg2.extensions.connection):
541    pass
542
543class ConnectionPool(object):
544    """ The pool of connections to database(s)
545
546        Keep a set of connections to pg databases open, and reuse them
547        to open cursors for all transactions.
548
549        The connections are *not* automatically closed. Only a close_db()
550        can trigger that.
551    """
552
553    def locked(fun):
554        @wraps(fun)
555        def _locked(self, *args, **kwargs):
556            self._lock.acquire()
557            try:
558                return fun(self, *args, **kwargs)
559            finally:
560                self._lock.release()
561        return _locked
562
563    def __init__(self, maxconn=64):
564        self._connections = []
565        self._maxconn = max(maxconn, 1)
566        self._lock = threading.Lock()
567
568    def __repr__(self):
569        used = len([1 for c, u in self._connections[:] if u])
570        count = len(self._connections)
571        return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
572
573    def _debug(self, msg, *args):
574        _logger.debug(('%r ' + msg), self, *args)
575
576    @locked
577    def borrow(self, connection_info):
578        """
579        :param dict connection_info: dict of psql connection keywords
580        :rtype: PsycoConnection
581        """
582        # free dead and leaked connections
583        for i, (cnx, _) in tools.reverse_enumerate(self._connections):
584            if cnx.closed:
585                self._connections.pop(i)
586                self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
587                continue
588            if getattr(cnx, 'leaked', False):
589                delattr(cnx, 'leaked')
590                self._connections.pop(i)
591                self._connections.append((cnx, False))
592                _logger.info('%r: Free leaked connection to %r', self, cnx.dsn)
593
594        for i, (cnx, used) in enumerate(self._connections):
595            if not used and cnx._original_dsn == connection_info:
596                try:
597                    cnx.reset()
598                except psycopg2.OperationalError:
599                    self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
600                    # psycopg2 2.4.4 and earlier do not allow closing a closed connection
601                    if not cnx.closed:
602                        cnx.close()
603                    continue
604                self._connections.pop(i)
605                self._connections.append((cnx, True))
606                self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
607
608                return cnx
609
610        if len(self._connections) >= self._maxconn:
611            # try to remove the oldest connection not used
612            for i, (cnx, used) in enumerate(self._connections):
613                if not used:
614                    self._connections.pop(i)
615                    if not cnx.closed:
616                        cnx.close()
617                    self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
618                    break
619            else:
620                # note: this code is called only if the for loop has completed (no break)
621                raise PoolError('The Connection Pool Is Full')
622
623        try:
624            result = psycopg2.connect(
625                connection_factory=PsycoConnection,
626                **connection_info)
627        except psycopg2.Error:
628            _logger.info('Connection to the database failed')
629            raise
630        result._original_dsn = connection_info
631        self._connections.append((result, True))
632        self._debug('Create new connection')
633        return result
634
635    @locked
636    def give_back(self, connection, keep_in_pool=True):
637        self._debug('Give back connection to %r', connection.dsn)
638        for i, (cnx, used) in enumerate(self._connections):
639            if cnx is connection:
640                self._connections.pop(i)
641                if keep_in_pool:
642                    self._connections.append((cnx, False))
643                    self._debug('Put connection to %r in pool', cnx.dsn)
644                else:
645                    self._debug('Forgot connection to %r', cnx.dsn)
646                    cnx.close()
647                break
648        else:
649            raise PoolError('This connection does not belong to the pool')
650
651    @locked
652    def close_all(self, dsn=None):
653        count = 0
654        last = None
655        for i, (cnx, used) in tools.reverse_enumerate(self._connections):
656            if dsn is None or cnx._original_dsn == dsn:
657                cnx.close()
658                last = self._connections.pop(i)[0]
659                count += 1
660        _logger.info('%r: Closed %d connections %s', self, count,
661                    (dsn and last and 'to %r' % last.dsn) or '')
662
663
664class Connection(object):
665    """ A lightweight instance of a connection to postgres
666    """
667    def __init__(self, pool, dbname, dsn):
668        self.dbname = dbname
669        self.dsn = dsn
670        self.__pool = pool
671
672    def cursor(self, serialized=True):
673        cursor_type = serialized and 'serialized ' or ''
674        _logger.debug('create %scursor to %r', cursor_type, self.dsn)
675        return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
676
677    # serialized_cursor is deprecated - cursors are serialized by default
678    serialized_cursor = cursor
679
680    def __bool__(self):
681        raise NotImplementedError()
682    __nonzero__ = __bool__
683
684def connection_info_for(db_or_uri):
685    """ parse the given `db_or_uri` and return a 2-tuple (dbname, connection_params)
686
687    Connection params are either a dictionary with a single key ``dsn``
688    containing a connection URI, or a dictionary containing connection
689    parameter keywords which psycopg2 can build a key/value connection string
690    (dsn) from
691
692    :param str db_or_uri: database name or postgres dsn
693    :rtype: (str, dict)
694    """
695    if db_or_uri.startswith(('postgresql://', 'postgres://')):
696        # extract db from uri
697        us = urls.url_parse(db_or_uri)
698        if len(us.path) > 1:
699            db_name = us.path[1:]
700        elif us.username:
701            db_name = us.username
702        else:
703            db_name = us.hostname
704        return db_name, {'dsn': db_or_uri}
705
706    connection_info = {'database': db_or_uri}
707    for p in ('host', 'port', 'user', 'password', 'sslmode'):
708        cfg = tools.config['db_' + p]
709        if cfg:
710            connection_info[p] = cfg
711
712    return db_or_uri, connection_info
713
714_Pool = None
715
716def db_connect(to, allow_uri=False):
717    global _Pool
718    if _Pool is None:
719        _Pool = ConnectionPool(int(tools.config['db_maxconn']))
720
721    db, info = connection_info_for(to)
722    if not allow_uri and db != to:
723        raise ValueError('URI connections not allowed')
724    return Connection(_Pool, db, info)
725
726def close_db(db_name):
727    """ You might want to call odoo.modules.registry.Registry.delete(db_name) along this function."""
728    global _Pool
729    if _Pool:
730        _Pool.close_all(connection_info_for(db_name)[1])
731
732def close_all():
733    global _Pool
734    if _Pool:
735        _Pool.close_all()
736