1# -*- coding: utf-8 -*-
2# Part of Odoo. See LICENSE file for full copyright and licensing details.
3
4
5"""
6    Object Relational Mapping module:
7     * Hierarchical structure
8     * Constraints consistency and validation
9     * Object metadata depends on its status
10     * Optimised processing by complex query (multiple actions at once)
11     * Default field values
12     * Permissions optimisation
13     * Persistent object: DB postgresql
14     * Data conversion
15     * Multi-level caching system
16     * Two different inheritance mechanisms
17     * Rich set of field types:
18          - classical (varchar, integer, boolean, ...)
19          - relational (one2many, many2one, many2many)
20          - functional
21
22"""
23
24import collections
25import contextlib
26import datetime
27import dateutil
28import fnmatch
29import functools
30import itertools
31import io
32import logging
33import operator
34import pytz
35import re
36import uuid
37from collections import defaultdict, OrderedDict
38from collections.abc import MutableMapping
39from contextlib import closing
40from inspect import getmembers, currentframe
41from operator import attrgetter, itemgetter
42
43import babel.dates
44import dateutil.relativedelta
45import psycopg2, psycopg2.extensions
46from lxml import etree
47from lxml.builder import E
48from psycopg2.extensions import AsIs
49
50import odoo
51from . import SUPERUSER_ID
52from . import api
53from . import tools
54from .exceptions import AccessError, MissingError, ValidationError, UserError
55from .osv.query import Query
56from .tools import frozendict, lazy_classproperty, ormcache, \
57                   Collector, LastOrderedSet, OrderedSet, IterableGenerator, \
58                   groupby
59from .tools.config import config
60from .tools.func import frame_codeinfo
61from .tools.misc import CountingStream, clean_context, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT, get_lang
62from .tools.translate import _
63from .tools import date_utils
64from .tools import populate
65from .tools import unique
66from .tools.lru import LRU
67
68_logger = logging.getLogger(__name__)
69_schema = logging.getLogger(__name__ + '.schema')
70_unlink = logging.getLogger(__name__ + '.unlink')
71
72regex_order = re.compile('^(\s*([a-z0-9:_]+|"[a-z0-9:_]+")(\s+(desc|asc))?\s*(,|$))+(?<!,)$', re.I)
73regex_object_name = re.compile(r'^[a-z0-9_.]+$')
74regex_pg_name = re.compile(r'^[a-z_][a-z0-9_$]*$', re.I)
75regex_field_agg = re.compile(r'(\w+)(?::(\w+)(?:\((\w+)\))?)?')
76
77AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
78
79def check_object_name(name):
80    """ Check if the given name is a valid model name.
81
82        The _name attribute in osv and osv_memory object is subject to
83        some restrictions. This function returns True or False whether
84        the given name is allowed or not.
85
86        TODO: this is an approximation. The goal in this approximation
87        is to disallow uppercase characters (in some places, we quote
88        table/column names and in other not, which leads to this kind
89        of errors:
90
91            psycopg2.ProgrammingError: relation "xxx" does not exist).
92
93        The same restriction should apply to both osv and osv_memory
94        objects for consistency.
95
96    """
97    if regex_object_name.match(name) is None:
98        return False
99    return True
100
101def raise_on_invalid_object_name(name):
102    if not check_object_name(name):
103        msg = "The _name attribute %s is not valid." % name
104        raise ValueError(msg)
105
106def check_pg_name(name):
107    """ Check whether the given name is a valid PostgreSQL identifier name. """
108    if not regex_pg_name.match(name):
109        raise ValidationError("Invalid characters in table name %r" % name)
110    if len(name) > 63:
111        raise ValidationError("Table name %r is too long" % name)
112
113# match private methods, to prevent their remote invocation
114regex_private = re.compile(r'^(_.*|init)$')
115
116def check_method_name(name):
117    """ Raise an ``AccessError`` if ``name`` is a private method name. """
118    if regex_private.match(name):
119        raise AccessError(_('Private methods (such as %s) cannot be called remotely.') % (name,))
120
121def same_name(f, g):
122    """ Test whether functions ``f`` and ``g`` are identical or have the same name """
123    return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
124
125def fix_import_export_id_paths(fieldname):
126    """
127    Fixes the id fields in import and exports, and splits field paths
128    on '/'.
129
130    :param str fieldname: name of the field to import/export
131    :return: split field name
132    :rtype: list of str
133    """
134    fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
135    fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
136    return fixed_external_id.split('/')
137
138def trigger_tree_merge(node1, node2):
139    """ Merge two trigger trees. """
140    for key, val in node2.items():
141        if key is None:
142            node1.setdefault(None, set())
143            node1[None].update(val)
144        else:
145            node1.setdefault(key, {})
146            trigger_tree_merge(node1[key], node2[key])
147
148
149class MetaModel(api.Meta):
150    """ The metaclass of all model classes.
151        Its main purpose is to register the models per module.
152    """
153
154    module_to_models = defaultdict(list)
155
156    def __new__(meta, name, bases, attrs):
157        attrs.setdefault('__slots__', ())
158        return super().__new__(meta, name, bases, attrs)
159
160    def __init__(self, name, bases, attrs):
161        if not self._register:
162            self._register = True
163            super(MetaModel, self).__init__(name, bases, attrs)
164            return
165
166        if not hasattr(self, '_module'):
167            assert self.__module__.startswith('odoo.addons.'), \
168                "Invalid import of %s.%s, it should start with 'odoo.addons'." % (self.__module__, name)
169            self._module = self.__module__.split('.')[2]
170
171        # Remember which models to instanciate for this module.
172        if self._module:
173            self.module_to_models[self._module].append(self)
174
175        for key, val in attrs.items():
176            if isinstance(val, Field):
177                val.args['_module'] = self._module
178
179
180class NewId(object):
181    """ Pseudo-ids for new records, encapsulating an optional origin id (actual
182        record id) and an optional reference (any value).
183    """
184    __slots__ = ['origin', 'ref']
185
186    def __init__(self, origin=None, ref=None):
187        self.origin = origin
188        self.ref = ref
189
190    def __bool__(self):
191        return False
192
193    def __eq__(self, other):
194        return isinstance(other, NewId) and (
195            (self.origin and other.origin and self.origin == other.origin)
196            or (self.ref and other.ref and self.ref == other.ref)
197        )
198
199    def __hash__(self):
200        return hash(self.origin or self.ref or id(self))
201
202    def __repr__(self):
203        return (
204            "<NewId origin=%r>" % self.origin if self.origin else
205            "<NewId ref=%r>" % self.ref if self.ref else
206            "<NewId 0x%x>" % id(self)
207        )
208
209    def __str__(self):
210        if self.origin or self.ref:
211            id_part = repr(self.origin or self.ref)
212        else:
213            id_part = hex(id(self))
214        return "NewId_%s" % id_part
215
216
217def origin_ids(ids):
218    """ Return an iterator over the origin ids corresponding to ``ids``.
219        Actual ids are returned as is, and ids without origin are not returned.
220    """
221    return ((id_ or id_.origin) for id_ in ids if (id_ or getattr(id_, "origin", None)))
222
223
224def expand_ids(id0, ids):
225    """ Return an iterator of unique ids from the concatenation of ``[id0]`` and
226        ``ids``, and of the same kind (all real or all new).
227    """
228    yield id0
229    seen = {id0}
230    kind = bool(id0)
231    for id_ in ids:
232        if id_ not in seen and bool(id_) == kind:
233            yield id_
234            seen.add(id_)
235
236
237IdType = (int, str, NewId)
238
239
240# maximum number of prefetched records
241PREFETCH_MAX = 1000
242
243# special columns automatically created by the ORM
244LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
245MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
246
247# valid SQL aggregation functions
248VALID_AGGREGATE_FUNCTIONS = {
249    'array_agg', 'count', 'count_distinct',
250    'bool_and', 'bool_or', 'max', 'min', 'avg', 'sum',
251}
252
253
254class BaseModel(MetaModel('DummyModel', (object,), {'_register': False})):
255    """Base class for Odoo models.
256
257    Odoo models are created by inheriting one of the following:
258
259    *   :class:`Model` for regular database-persisted models
260
261    *   :class:`TransientModel` for temporary data, stored in the database but
262        automatically vacuumed every so often
263
264    *   :class:`AbstractModel` for abstract super classes meant to be shared by
265        multiple inheriting models
266
267    The system automatically instantiates every model once per database. Those
268    instances represent the available models on each database, and depend on
269    which modules are installed on that database. The actual class of each
270    instance is built from the Python classes that create and inherit from the
271    corresponding model.
272
273    Every model instance is a "recordset", i.e., an ordered collection of
274    records of the model. Recordsets are returned by methods like
275    :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
276    explicit representation: a record is represented as a recordset of one
277    record.
278
279    To create a class that should not be instantiated,
280    the :attr:`~odoo.models.BaseModel._register` attribute may be set to False.
281    """
282    __slots__ = ['env', '_ids', '_prefetch_ids']
283
284    _auto = False
285    """Whether a database table should be created.
286    If set to ``False``, override :meth:`~odoo.models.BaseModel.init`
287    to create the database table.
288
289    Automatically defaults to `True` for :class:`Model` and
290    :class:`TransientModel`, `False` for :class:`AbstractModel`.
291
292    .. tip:: To create a model without any table, inherit
293            from :class:`~odoo.models.AbstractModel`.
294    """
295    _register = False           #: registry visibility
296    _abstract = True
297    """ Whether the model is *abstract*.
298
299    .. seealso:: :class:`AbstractModel`
300    """
301    _transient = False
302    """ Whether the model is *transient*.
303
304    .. seealso:: :class:`TransientModel`
305    """
306
307    _name = None                #: the model name (in dot-notation, module namespace)
308    _description = None         #: the model's informal name
309    _custom = False             #: should be True for custom models only
310
311    _inherit = None
312    """Python-inherited models:
313
314    :type: str or list(str)
315
316    .. note::
317
318        * If :attr:`._name` is set, name(s) of parent models to inherit from
319        * If :attr:`._name` is unset, name of a single model to extend in-place
320    """
321    _inherits = {}
322    """dictionary {'parent_model': 'm2o_field'} mapping the _name of the parent business
323    objects to the names of the corresponding foreign key fields to use::
324
325      _inherits = {
326          'a.model': 'a_field_id',
327          'b.model': 'b_field_id'
328      }
329
330    implements composition-based inheritance: the new model exposes all
331    the fields of the inherited models but stores none of them:
332    the values themselves remain stored on the linked record.
333
334    .. warning::
335
336      if multiple fields with the same name are defined in the
337      :attr:`~odoo.models.Model._inherits`-ed models, the inherited field will
338      correspond to the last one (in the inherits list order).
339    """
340    _table = None               #: SQL table name used by model if :attr:`_auto`
341    _table_query = None         #: SQL expression of the table's content (optional)
342    _sequence = None            #: SQL sequence to use for ID field
343    _sql_constraints = []       #: SQL constraints [(name, sql_def, message)]
344
345    _rec_name = None            #: field to use for labeling records, default: ``name``
346    _order = 'id'               #: default order field for searching results
347    _parent_name = 'parent_id'  #: the many2one field used as parent field
348    _parent_store = False
349    """set to True to compute parent_path field.
350
351    Alongside a :attr:`~.parent_path` field, sets up an indexed storage
352    of the tree structure of records, to enable faster hierarchical queries
353    on the records of the current model using the ``child_of`` and
354    ``parent_of`` domain operators.
355    """
356    _active_name = None         #: field to use for active records
357    _date_name = 'date'         #: field to use for default calendar view
358    _fold_name = 'fold'         #: field to determine folded groups in kanban views
359
360    _needaction = False         # whether the model supports "need actions" (Old API)
361    _translate = True           # False disables translations export for this model (Old API)
362    _check_company_auto = False
363    """On write and create, call ``_check_company`` to ensure companies
364    consistency on the relational fields having ``check_company=True``
365    as attribute.
366    """
367
368    _depends = {}
369    """dependencies of models backed up by SQL views
370    ``{model_name: field_names}``, where ``field_names`` is an iterable.
371    This is only used to determine the changes to flush to database before
372    executing ``search()`` or ``read_group()``. It won't be used for cache
373    invalidation or recomputing fields.
374    """
375
376    # default values for _transient_vacuum()
377    _transient_max_count = lazy_classproperty(lambda _: config.get('osv_memory_count_limit'))
378    _transient_max_hours = lazy_classproperty(lambda _: config.get('transient_age_limit'))
379
380    CONCURRENCY_CHECK_FIELD = '__last_update'
381
382    @api.model
383    def view_init(self, fields_list):
384        """ Override this method to do specific things when a form view is
385        opened. This method is invoked by :meth:`~default_get`.
386        """
387        pass
388
389    def _valid_field_parameter(self, field, name):
390        """ Return whether the given parameter name is valid for the field. """
391        return name == 'related_sudo'
392
393    @api.model
394    def _add_field(self, name, field):
395        """ Add the given ``field`` under the given ``name`` in the class """
396        cls = type(self)
397        # add field as an attribute and in cls._fields (for reflection)
398        if not isinstance(getattr(cls, name, field), Field):
399            _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
400        setattr(cls, name, field)
401        cls._fields[name] = field
402
403        # basic setup of field
404        field.setup_base(self, name)
405
406    @api.model
407    def _pop_field(self, name):
408        """ Remove the field with the given ``name`` from the model.
409            This method should only be used for manual fields.
410        """
411        cls = type(self)
412        field = cls._fields.pop(name, None)
413        if hasattr(cls, name):
414            delattr(cls, name)
415        if cls._rec_name == name:
416            # fixup _rec_name and display_name's dependencies
417            cls._rec_name = None
418            cls.display_name.depends = tuple(dep for dep in cls.display_name.depends if dep != name)
419        return field
420
421    @api.model
422    def _add_magic_fields(self):
423        """ Introduce magic fields on the current class
424
425        * id is a "normal" field (with a specific getter)
426        * create_uid, create_date, write_uid and write_date have become
427          "normal" fields
428        * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
429          method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
430          to get the same structure as the previous
431          ``(now() at time zone 'UTC')::timestamp``::
432
433              # select (now() at time zone 'UTC')::timestamp;
434                        timezone
435              ----------------------------
436               2013-06-18 08:30:37.292809
437
438              >>> str(datetime.datetime.utcnow())
439              '2013-06-18 08:31:32.821177'
440        """
441        def add(name, field):
442            """ add ``field`` with the given ``name`` if it does not exist yet """
443            if name not in self._fields:
444                self._add_field(name, field)
445
446        # cyclic import
447        from . import fields
448
449        # this field 'id' must override any other column or field
450        self._add_field('id', fields.Id(automatic=True))
451
452        add('display_name', fields.Char(string='Display Name', automatic=True,
453            compute='_compute_display_name'))
454
455        if self._log_access:
456            add('create_uid', fields.Many2one(
457                'res.users', string='Created by', automatic=True, readonly=True))
458            add('create_date', fields.Datetime(
459                string='Created on', automatic=True, readonly=True))
460            add('write_uid', fields.Many2one(
461                'res.users', string='Last Updated by', automatic=True, readonly=True))
462            add('write_date', fields.Datetime(
463                string='Last Updated on', automatic=True, readonly=True))
464            last_modified_name = 'compute_concurrency_field_with_access'
465        else:
466            last_modified_name = 'compute_concurrency_field'
467
468        # this field must override any other column or field
469        self._add_field(self.CONCURRENCY_CHECK_FIELD, fields.Datetime(
470            string='Last Modified on', compute=last_modified_name,
471            compute_sudo=False, automatic=True))
472
473    def compute_concurrency_field(self):
474        for record in self:
475            record[self.CONCURRENCY_CHECK_FIELD] = odoo.fields.Datetime.now()
476
477    @api.depends('create_date', 'write_date')
478    def compute_concurrency_field_with_access(self):
479        for record in self:
480            record[self.CONCURRENCY_CHECK_FIELD] = \
481                record.write_date or record.create_date or odoo.fields.Datetime.now()
482
483    #
484    # Goal: try to apply inheritance at the instantiation level and
485    #       put objects in the pool var
486    #
487    @classmethod
488    def _build_model(cls, pool, cr):
489        """ Instantiate a given model in the registry.
490
491        This method creates or extends a "registry" class for the given model.
492        This "registry" class carries inferred model metadata, and inherits (in
493        the Python sense) from all classes that define the model, and possibly
494        other registry classes.
495
496        """
497
498        # In the simplest case, the model's registry class inherits from cls and
499        # the other classes that define the model in a flat hierarchy. The
500        # registry contains the instance ``model`` (on the left). Its class,
501        # ``ModelClass``, carries inferred metadata that is shared between all
502        # the model's instances for this registry only.
503        #
504        #   class A1(Model):                          Model
505        #       _name = 'a'                           / | \
506        #                                            A3 A2 A1
507        #   class A2(Model):                          \ | /
508        #       _inherit = 'a'                      ModelClass
509        #                                             /   \
510        #   class A3(Model):                      model   recordset
511        #       _inherit = 'a'
512        #
513        # When a model is extended by '_inherit', its base classes are modified
514        # to include the current class and the other inherited model classes.
515        # Note that we actually inherit from other ``ModelClass``, so that
516        # extensions to an inherited model are immediately visible in the
517        # current model class, like in the following example:
518        #
519        #   class A1(Model):
520        #       _name = 'a'                           Model
521        #                                            / / \ \
522        #   class B1(Model):                        / A2 A1 \
523        #       _name = 'b'                        /   \ /   \
524        #                                         B2  ModelA  B1
525        #   class B2(Model):                       \    |    /
526        #       _name = 'b'                         \   |   /
527        #       _inherit = ['a', 'b']                \  |  /
528        #                                             ModelB
529        #   class A2(Model):
530        #       _inherit = 'a'
531
532        if getattr(cls, '_constraints', None):
533            _logger.warning("Model attribute '_constraints' is no longer supported, "
534                            "please use @api.constrains on methods instead.")
535
536        # Keep links to non-inherited constraints in cls; this is useful for
537        # instance when exporting translations
538        cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
539
540        # determine inherited models
541        parents = cls._inherit
542        parents = [parents] if isinstance(parents, str) else (parents or [])
543
544        # determine the model's name
545        name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
546
547        # all models except 'base' implicitly inherit from 'base'
548        if name != 'base':
549            parents = list(parents) + ['base']
550
551        # create or retrieve the model's class
552        if name in parents:
553            if name not in pool:
554                raise TypeError("Model %r does not exist in registry." % name)
555            ModelClass = pool[name]
556            ModelClass._build_model_check_base(cls)
557            check_parent = ModelClass._build_model_check_parent
558        else:
559            ModelClass = type(name, (BaseModel,), {
560                '_name': name,
561                '_register': False,
562                '_original_module': cls._module,
563                '_inherit_module': dict(),              # map parent to introducing module
564                '_inherit_children': OrderedSet(),      # names of children models
565                '_inherits_children': set(),            # names of children models
566                '_fields': OrderedDict(),               # populated in _setup_base()
567            })
568            check_parent = cls._build_model_check_parent
569
570        # determine all the classes the model should inherit from
571        bases = LastOrderedSet([cls])
572        for parent in parents:
573            if parent not in pool:
574                raise TypeError("Model %r inherits from non-existing model %r." % (name, parent))
575            parent_class = pool[parent]
576            if parent == name:
577                for base in parent_class.__bases__:
578                    bases.add(base)
579            else:
580                check_parent(cls, parent_class)
581                bases.add(parent_class)
582                ModelClass._inherit_module[parent] = cls._module
583                parent_class._inherit_children.add(name)
584
585        ModelClass.__bases__ = tuple(bases)
586
587        # determine the attributes of the model's class
588        ModelClass._build_model_attributes(pool)
589
590        check_pg_name(ModelClass._table)
591
592        # Transience
593        if ModelClass._transient:
594            assert ModelClass._log_access, \
595                "TransientModels must have log_access turned on, " \
596                "in order to implement their vacuum policy"
597
598        # link the class to the registry, and update the registry
599        ModelClass.pool = pool
600        pool[name] = ModelClass
601
602        # backward compatibility: instantiate the model, and initialize it
603        model = object.__new__(ModelClass)
604        model.__init__(pool, cr)
605
606        return ModelClass
607
608    @classmethod
609    def _build_model_check_base(model_class, cls):
610        """ Check whether ``model_class`` can be extended with ``cls``. """
611        if model_class._abstract and not cls._abstract:
612            msg = ("%s transforms the abstract model %r into a non-abstract model. "
613                   "That class should either inherit from AbstractModel, or set a different '_name'.")
614            raise TypeError(msg % (cls, model_class._name))
615        if model_class._transient != cls._transient:
616            if model_class._transient:
617                msg = ("%s transforms the transient model %r into a non-transient model. "
618                       "That class should either inherit from TransientModel, or set a different '_name'.")
619            else:
620                msg = ("%s transforms the model %r into a transient model. "
621                       "That class should either inherit from Model, or set a different '_name'.")
622            raise TypeError(msg % (cls, model_class._name))
623
624    @classmethod
625    def _build_model_check_parent(model_class, cls, parent_class):
626        """ Check whether ``model_class`` can inherit from ``parent_class``. """
627        if model_class._abstract and not parent_class._abstract:
628            msg = ("In %s, the abstract model %r cannot inherit from the non-abstract model %r.")
629            raise TypeError(msg % (cls, model_class._name, parent_class._name))
630
631    @classmethod
632    def _build_model_attributes(cls, pool):
633        """ Initialize base model attributes. """
634        cls._description = cls._name
635        cls._table = cls._name.replace('.', '_')
636        cls._sequence = None
637        cls._log_access = cls._auto
638        cls._inherits = {}
639        cls._depends = {}
640        cls._sql_constraints = {}
641
642        for base in reversed(cls.__bases__):
643            if not getattr(base, 'pool', None):
644                # the following attributes are not taken from model classes
645                parents = [base._inherit] if base._inherit and isinstance(base._inherit, str) else (base._inherit or [])
646                if cls._name not in parents and not base._description:
647                    _logger.warning("The model %s has no _description", cls._name)
648                cls._description = base._description or cls._description
649                cls._table = base._table or cls._table
650                cls._sequence = base._sequence or cls._sequence
651                cls._log_access = getattr(base, '_log_access', cls._log_access)
652
653            cls._inherits.update(base._inherits)
654
655            for mname, fnames in base._depends.items():
656                cls._depends.setdefault(mname, []).extend(fnames)
657
658            for cons in base._sql_constraints:
659                cls._sql_constraints[cons[0]] = cons
660
661        cls._sequence = cls._sequence or (cls._table + '_id_seq')
662        cls._sql_constraints = list(cls._sql_constraints.values())
663
664        # update _inherits_children of parent models
665        for parent_name in cls._inherits:
666            pool[parent_name]._inherits_children.add(cls._name)
667
668        # recompute attributes of _inherit_children models
669        for child_name in cls._inherit_children:
670            child_class = pool[child_name]
671            child_class._build_model_attributes(pool)
672
673    @classmethod
674    def _init_constraints_onchanges(cls):
675        # store list of sql constraint qualified names
676        for (key, _, _) in cls._sql_constraints:
677            cls.pool._sql_constraints.add(cls._table + '_' + key)
678
679        # reset properties memoized on cls
680        cls._constraint_methods = BaseModel._constraint_methods
681        cls._onchange_methods = BaseModel._onchange_methods
682
683    @property
684    def _constraint_methods(self):
685        """ Return a list of methods implementing Python constraints. """
686        def is_constraint(func):
687            return callable(func) and hasattr(func, '_constrains')
688
689        cls = type(self)
690        methods = []
691        for attr, func in getmembers(cls, is_constraint):
692            for name in func._constrains:
693                field = cls._fields.get(name)
694                if not field:
695                    _logger.warning("method %s.%s: @constrains parameter %r is not a field name", cls._name, attr, name)
696                elif not (field.store or field.inverse or field.inherited):
697                    _logger.warning("method %s.%s: @constrains parameter %r is not writeable", cls._name, attr, name)
698            methods.append(func)
699
700        # optimization: memoize result on cls, it will not be recomputed
701        cls._constraint_methods = methods
702        return methods
703
704    @property
705    def _onchange_methods(self):
706        """ Return a dictionary mapping field names to onchange methods. """
707        def is_onchange(func):
708            return callable(func) and hasattr(func, '_onchange')
709
710        # collect onchange methods on the model's class
711        cls = type(self)
712        methods = defaultdict(list)
713        for attr, func in getmembers(cls, is_onchange):
714            missing = []
715            for name in func._onchange:
716                if name not in cls._fields:
717                    missing.append(name)
718                methods[name].append(func)
719            if missing:
720                _logger.warning(
721                    "@api.onchange%r parameters must be field names -> not valid: %s",
722                    func._onchange, missing
723                )
724
725        # add onchange methods to implement "change_default" on fields
726        def onchange_default(field, self):
727            value = field.convert_to_write(self[field.name], self)
728            condition = "%s=%s" % (field.name, value)
729            defaults = self.env['ir.default'].get_model_defaults(self._name, condition)
730            self.update(defaults)
731
732        for name, field in cls._fields.items():
733            if field.change_default:
734                methods[name].append(functools.partial(onchange_default, field))
735
736        # optimization: memoize result on cls, it will not be recomputed
737        cls._onchange_methods = methods
738        return methods
739
740    def __new__(cls):
741        # In the past, this method was registering the model class in the server.
742        # This job is now done entirely by the metaclass MetaModel.
743        return None
744
745    def __init__(self, pool, cr):
746        """ Deprecated method to initialize the model. """
747        pass
748
749    def _is_an_ordinary_table(self):
750        return self.pool.is_an_ordinary_table(self)
751
752    def __ensure_xml_id(self, skip=False):
753        """ Create missing external ids for records in ``self``, and return an
754            iterator of pairs ``(record, xmlid)`` for the records in ``self``.
755
756        :rtype: Iterable[Model, str | None]
757        """
758        if skip:
759            return ((record, None) for record in self)
760
761        if not self:
762            return iter([])
763
764        if not self._is_an_ordinary_table():
765            raise Exception(
766                "You can not export the column ID of model %s, because the "
767                "table %s is not an ordinary table."
768                % (self._name, self._table))
769
770        modname = '__export__'
771
772        cr = self.env.cr
773        cr.execute("""
774            SELECT res_id, module, name
775            FROM ir_model_data
776            WHERE model = %s AND res_id in %s
777        """, (self._name, tuple(self.ids)))
778        xids = {
779            res_id: (module, name)
780            for res_id, module, name in cr.fetchall()
781        }
782        def to_xid(record_id):
783            (module, name) = xids[record_id]
784            return ('%s.%s' % (module, name)) if module else name
785
786        # create missing xml ids
787        missing = self.filtered(lambda r: r.id not in xids)
788        if not missing:
789            return (
790                (record, to_xid(record.id))
791                for record in self
792            )
793
794        xids.update(
795            (r.id, (modname, '%s_%s_%s' % (
796                r._table,
797                r.id,
798                uuid.uuid4().hex[:8],
799            )))
800            for r in missing
801        )
802        fields = ['module', 'model', 'name', 'res_id']
803
804        # disable eventual async callback / support for the extent of
805        # the COPY FROM, as these are apparently incompatible
806        callback = psycopg2.extensions.get_wait_callback()
807        psycopg2.extensions.set_wait_callback(None)
808        try:
809            cr.copy_from(io.StringIO(
810                u'\n'.join(
811                    u"%s\t%s\t%s\t%d" % (
812                        modname,
813                        record._name,
814                        xids[record.id][1],
815                        record.id,
816                    )
817                    for record in missing
818                )),
819                table='ir_model_data',
820                columns=fields,
821            )
822        finally:
823            psycopg2.extensions.set_wait_callback(callback)
824        self.env['ir.model.data'].invalidate_cache(fnames=fields)
825
826        return (
827            (record, to_xid(record.id))
828            for record in self
829        )
830
831    def _export_rows(self, fields, *, _is_toplevel_call=True):
832        """ Export fields of the records in ``self``.
833
834            :param fields: list of lists of fields to traverse
835            :param bool _is_toplevel_call:
836                used when recursing, avoid using when calling from outside
837            :return: list of lists of corresponding values
838        """
839        import_compatible = self.env.context.get('import_compat', True)
840        lines = []
841
842        def splittor(rs):
843            """ Splits the self recordset in batches of 1000 (to avoid
844            entire-recordset-prefetch-effects) & removes the previous batch
845            from the cache after it's been iterated in full
846            """
847            for idx in range(0, len(rs), 1000):
848                sub = rs[idx:idx+1000]
849                for rec in sub:
850                    yield rec
851                rs.invalidate_cache(ids=sub.ids)
852        if not _is_toplevel_call:
853            splittor = lambda rs: rs
854
855        # memory stable but ends up prefetching 275 fields (???)
856        for record in splittor(self):
857            # main line of record, initially empty
858            current = [''] * len(fields)
859            lines.append(current)
860
861            # list of primary fields followed by secondary field(s)
862            primary_done = []
863
864            # process column by column
865            for i, path in enumerate(fields):
866                if not path:
867                    continue
868
869                name = path[0]
870                if name in primary_done:
871                    continue
872
873                if name == '.id':
874                    current[i] = str(record.id)
875                elif name == 'id':
876                    current[i] = (record._name, record.id)
877                else:
878                    field = record._fields[name]
879                    value = record[name]
880
881                    # this part could be simpler, but it has to be done this way
882                    # in order to reproduce the former behavior
883                    if not isinstance(value, BaseModel):
884                        current[i] = field.convert_to_export(value, record)
885                    else:
886                        primary_done.append(name)
887                        # recursively export the fields that follow name; use
888                        # 'display_name' where no subfield is exported
889                        fields2 = [(p[1:] or ['display_name'] if p and p[0] == name else [])
890                                   for p in fields]
891
892                        # in import_compat mode, m2m should always be exported as
893                        # a comma-separated list of xids or names in a single cell
894                        if import_compatible and field.type == 'many2many':
895                            index = None
896                            # find out which subfield the user wants & its
897                            # location as we might not get it as the first
898                            # column we encounter
899                            for name in ['id', 'name', 'display_name']:
900                                with contextlib.suppress(ValueError):
901                                    index = fields2.index([name])
902                                    break
903                            if index is None:
904                                # not found anything, assume we just want the
905                                # name_get in the first column
906                                name = None
907                                index = i
908
909                            if name == 'id':
910                                xml_ids = [xid for _, xid in value.__ensure_xml_id()]
911                                current[index] = ','.join(xml_ids) or False
912                            else:
913                                current[index] = field.convert_to_export(value, record) or False
914                            continue
915
916                        lines2 = value._export_rows(fields2, _is_toplevel_call=False)
917                        if lines2:
918                            # merge first line with record's main line
919                            for j, val in enumerate(lines2[0]):
920                                if val or isinstance(val, (int, float)):
921                                    current[j] = val
922                            # append the other lines at the end
923                            lines += lines2[1:]
924                        else:
925                            current[i] = False
926
927        # if any xid should be exported, only do so at toplevel
928        if _is_toplevel_call and any(f[-1] == 'id' for f in fields):
929            bymodels = collections.defaultdict(set)
930            xidmap = collections.defaultdict(list)
931            # collect all the tuples in "lines" (along with their coordinates)
932            for i, line in enumerate(lines):
933                for j, cell in enumerate(line):
934                    if type(cell) is tuple:
935                        bymodels[cell[0]].add(cell[1])
936                        xidmap[cell].append((i, j))
937            # for each model, xid-export everything and inject in matrix
938            for model, ids in bymodels.items():
939                for record, xid in self.env[model].browse(ids).__ensure_xml_id():
940                    for i, j in xidmap.pop((record._name, record.id)):
941                        lines[i][j] = xid
942            assert not xidmap, "failed to export xids for %s" % ', '.join('{}:{}' % it for it in xidmap.items())
943
944        return lines
945
946    # backward compatibility
947    __export_rows = _export_rows
948
949    def export_data(self, fields_to_export):
950        """ Export fields for selected objects
951
952            :param fields_to_export: list of fields
953            :param raw_data: True to return value in native Python type
954            :rtype: dictionary with a *datas* matrix
955
956            This method is used when exporting data via client menu
957        """
958        if not (self.env.is_admin() or self.env.user.has_group('base.group_allow_export')):
959            raise UserError(_("You don't have the rights to export data. Please contact an Administrator."))
960        fields_to_export = [fix_import_export_id_paths(f) for f in fields_to_export]
961        return {'datas': self._export_rows(fields_to_export)}
962
963    @api.model
964    def load(self, fields, data):
965        """
966        Attempts to load the data matrix, and returns a list of ids (or
967        ``False`` if there was an error and no id could be generated) and a
968        list of messages.
969
970        The ids are those of the records created and saved (in database), in
971        the same order they were extracted from the file. They can be passed
972        directly to :meth:`~read`
973
974        :param fields: list of fields to import, at the same index as the corresponding data
975        :type fields: list(str)
976        :param data: row-major matrix of data to import
977        :type data: list(list(str))
978        :returns: {ids: list(int)|False, messages: [Message][, lastrow: int]}
979        """
980        self.flush()
981
982        # determine values of mode, current_module and noupdate
983        mode = self._context.get('mode', 'init')
984        current_module = self._context.get('module', '__import__')
985        noupdate = self._context.get('noupdate', False)
986        # add current module in context for the conversion of xml ids
987        self = self.with_context(_import_current_module=current_module)
988
989        cr = self._cr
990        cr.execute('SAVEPOINT model_load')
991
992        fields = [fix_import_export_id_paths(f) for f in fields]
993        fg = self.fields_get()
994
995        ids = []
996        messages = []
997        ModelData = self.env['ir.model.data']
998
999        # list of (xid, vals, info) for records to be created in batch
1000        batch = []
1001        batch_xml_ids = set()
1002        # models in which we may have created / modified data, therefore might
1003        # require flushing in order to name_search: the root model and any
1004        # o2m
1005        creatable_models = {self._name}
1006        for field_path in fields:
1007            if field_path[0] in (None, 'id', '.id'):
1008                continue
1009            model_fields = self._fields
1010            if isinstance(model_fields[field_path[0]], odoo.fields.Many2one):
1011                # this only applies for toplevel m2o (?) fields
1012                if field_path[0] in (self.env.context.get('name_create_enabled_fieds') or {}):
1013                    creatable_models.add(model_fields[field_path[0]].comodel_name)
1014            for field_name in field_path:
1015                if field_name in (None, 'id', '.id'):
1016                    break
1017
1018                if isinstance(model_fields[field_name], odoo.fields.One2many):
1019                    comodel = model_fields[field_name].comodel_name
1020                    creatable_models.add(comodel)
1021                    model_fields = self.env[comodel]._fields
1022
1023        def flush(*, xml_id=None, model=None):
1024            if not batch:
1025                return
1026
1027            assert not (xml_id and model), \
1028                "flush can specify *either* an external id or a model, not both"
1029
1030            if xml_id and xml_id not in batch_xml_ids:
1031                if xml_id not in self.env:
1032                    return
1033            if model and model not in creatable_models:
1034                return
1035
1036            data_list = [
1037                dict(xml_id=xid, values=vals, info=info, noupdate=noupdate)
1038                for xid, vals, info in batch
1039            ]
1040            batch.clear()
1041            batch_xml_ids.clear()
1042
1043            # try to create in batch
1044            try:
1045                with cr.savepoint():
1046                    recs = self._load_records(data_list, mode == 'update')
1047                    ids.extend(recs.ids)
1048                return
1049            except psycopg2.InternalError as e:
1050                # broken transaction, exit and hope the source error was already logged
1051                if not any(message['type'] == 'error' for message in messages):
1052                    info = data_list[0]['info']
1053                    messages.append(dict(info, type='error', message=_(u"Unknown database error: '%s'", e)))
1054                return
1055            except Exception:
1056                pass
1057
1058            errors = 0
1059            # try again, this time record by record
1060            for i, rec_data in enumerate(data_list, 1):
1061                try:
1062                    with cr.savepoint():
1063                        rec = self._load_records([rec_data], mode == 'update')
1064                        ids.append(rec.id)
1065                except psycopg2.Warning as e:
1066                    info = rec_data['info']
1067                    messages.append(dict(info, type='warning', message=str(e)))
1068                except psycopg2.Error as e:
1069                    info = rec_data['info']
1070                    messages.append(dict(info, type='error', **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1071                    # Failed to write, log to messages, rollback savepoint (to
1072                    # avoid broken transaction) and keep going
1073                    errors += 1
1074                except Exception as e:
1075                    _logger.debug("Error while loading record", exc_info=True)
1076                    info = rec_data['info']
1077                    message = (_(u'Unknown error during import:') + u' %s: %s' % (type(e), e))
1078                    moreinfo = _('Resolve other errors first')
1079                    messages.append(dict(info, type='error', message=message, moreinfo=moreinfo))
1080                    # Failed for some reason, perhaps due to invalid data supplied,
1081                    # rollback savepoint and keep going
1082                    errors += 1
1083                if errors >= 10 and (errors >= i / 10):
1084                    messages.append({
1085                        'type': 'warning',
1086                        'message': _(u"Found more than 10 errors and more than one error per 10 records, interrupted to avoid showing too many errors.")
1087                    })
1088                    break
1089
1090        # make 'flush' available to the methods below, in the case where XMLID
1091        # resolution fails, for instance
1092        flush_self = self.with_context(import_flush=flush, import_cache=LRU(1024))
1093
1094        # TODO: break load's API instead of smuggling via context?
1095        limit = self._context.get('_import_limit')
1096        if limit is None:
1097            limit = float('inf')
1098        extracted = flush_self._extract_records(fields, data, log=messages.append, limit=limit)
1099
1100        converted = flush_self._convert_records(extracted, log=messages.append)
1101
1102        info = {'rows': {'to': -1}}
1103        for id, xid, record, info in converted:
1104            if xid:
1105                xid = xid if '.' in xid else "%s.%s" % (current_module, xid)
1106                batch_xml_ids.add(xid)
1107            elif id:
1108                record['id'] = id
1109            batch.append((xid, record, info))
1110
1111        flush()
1112        if any(message['type'] == 'error' for message in messages):
1113            cr.execute('ROLLBACK TO SAVEPOINT model_load')
1114            ids = False
1115            # cancel all changes done to the registry/ormcache
1116            self.pool.reset_changes()
1117
1118        nextrow = info['rows']['to'] + 1
1119        if nextrow < limit:
1120            nextrow = 0
1121        return {
1122            'ids': ids,
1123            'messages': messages,
1124            'nextrow': nextrow,
1125        }
1126
1127    def _add_fake_fields(self, fields):
1128        from odoo.fields import Char, Integer
1129        fields[None] = Char('rec_name')
1130        fields['id'] = Char('External ID')
1131        fields['.id'] = Integer('Database ID')
1132        return fields
1133
1134    def _extract_records(self, fields_, data, log=lambda a: None, limit=float('inf')):
1135        """ Generates record dicts from the data sequence.
1136
1137        The result is a generator of dicts mapping field names to raw
1138        (unconverted, unvalidated) values.
1139
1140        For relational fields, if sub-fields were provided the value will be
1141        a list of sub-records
1142
1143        The following sub-fields may be set on the record (by key):
1144        * None is the name_get for the record (to use with name_create/name_search)
1145        * "id" is the External ID for the record
1146        * ".id" is the Database ID for the record
1147        """
1148        fields = dict(self._fields)
1149        # Fake fields to avoid special cases in extractor
1150        fields = self._add_fake_fields(fields)
1151        # m2o fields can't be on multiple lines so exclude them from the
1152        # is_relational field rows filter, but special-case it later on to
1153        # be handled with relational fields (as it can have subfields)
1154        is_relational = lambda field: fields[field].relational
1155        get_o2m_values = itemgetter_tuple([
1156            index
1157            for index, fnames in enumerate(fields_)
1158            if fields[fnames[0]].type == 'one2many'
1159        ])
1160        get_nono2m_values = itemgetter_tuple([
1161            index
1162            for index, fnames in enumerate(fields_)
1163            if fields[fnames[0]].type != 'one2many'
1164        ])
1165        # Checks if the provided row has any non-empty one2many fields
1166        def only_o2m_values(row):
1167            return any(get_o2m_values(row)) and not any(get_nono2m_values(row))
1168
1169        index = 0
1170        while index < len(data) and index < limit:
1171            row = data[index]
1172
1173            # copy non-relational fields to record dict
1174            record = {fnames[0]: value
1175                      for fnames, value in zip(fields_, row)
1176                      if not is_relational(fnames[0])}
1177
1178            # Get all following rows which have relational values attached to
1179            # the current record (no non-relational values)
1180            record_span = itertools.takewhile(
1181                only_o2m_values, itertools.islice(data, index + 1, None))
1182            # stitch record row back on for relational fields
1183            record_span = list(itertools.chain([row], record_span))
1184            for relfield in set(fnames[0] for fnames in fields_ if is_relational(fnames[0])):
1185                comodel = self.env[fields[relfield].comodel_name]
1186
1187                # get only cells for this sub-field, should be strictly
1188                # non-empty, field path [None] is for name_get field
1189                indices, subfields = zip(*((index, fnames[1:] or [None])
1190                                           for index, fnames in enumerate(fields_)
1191                                           if fnames[0] == relfield))
1192
1193                # return all rows which have at least one value for the
1194                # subfields of relfield
1195                relfield_data = [it for it in map(itemgetter_tuple(indices), record_span) if any(it)]
1196                record[relfield] = [
1197                    subrecord
1198                    for subrecord, _subinfo in comodel._extract_records(subfields, relfield_data, log=log)
1199                ]
1200
1201            yield record, {'rows': {
1202                'from': index,
1203                'to': index + len(record_span) - 1,
1204            }}
1205            index += len(record_span)
1206
1207    @api.model
1208    def _convert_records(self, records, log=lambda a: None):
1209        """ Converts records from the source iterable (recursive dicts of
1210        strings) into forms which can be written to the database (via
1211        self.create or (ir.model.data)._update)
1212
1213        :returns: a list of triplets of (id, xid, record)
1214        :rtype: list((int|None, str|None, dict))
1215        """
1216        field_names = {name: field.string for name, field in self._fields.items()}
1217        if self.env.lang:
1218            field_names.update(self.env['ir.translation'].get_field_string(self._name))
1219
1220        convert = self.env['ir.fields.converter'].for_model(self)
1221
1222        def _log(base, record, field, exception):
1223            type = 'warning' if isinstance(exception, Warning) else 'error'
1224            # logs the logical (not human-readable) field name for automated
1225            # processing of response, but injects human readable in message
1226            exc_vals = dict(base, record=record, field=field_names[field])
1227            record = dict(base, type=type, record=record, field=field,
1228                          message=str(exception.args[0]) % exc_vals)
1229            if len(exception.args) > 1 and isinstance(exception.args[1], dict):
1230                record.update(exception.args[1])
1231            log(record)
1232
1233        stream = CountingStream(records)
1234        for record, extras in stream:
1235            # xid
1236            xid = record.get('id', False)
1237            # dbid
1238            dbid = False
1239            if '.id' in record:
1240                try:
1241                    dbid = int(record['.id'])
1242                except ValueError:
1243                    # in case of overridden id column
1244                    dbid = record['.id']
1245                if not self.search([('id', '=', dbid)]):
1246                    log(dict(extras,
1247                        type='error',
1248                        record=stream.index,
1249                        field='.id',
1250                        message=_(u"Unknown database identifier '%s'", dbid)))
1251                    dbid = False
1252
1253            converted = convert(record, functools.partial(_log, extras, stream.index))
1254
1255            yield dbid, xid, converted, dict(extras, record=stream.index)
1256
1257    def _validate_fields(self, field_names, excluded_names=()):
1258        """ Invoke the constraint methods for which at least one field name is
1259        in ``field_names`` and none is in ``excluded_names``.
1260        """
1261        field_names = set(field_names)
1262        excluded_names = set(excluded_names)
1263        for check in self._constraint_methods:
1264            if (not field_names.isdisjoint(check._constrains)
1265                    and excluded_names.isdisjoint(check._constrains)):
1266                check(self)
1267
1268    @api.model
1269    def default_get(self, fields_list):
1270        """ default_get(fields_list) -> default_values
1271
1272        Return default values for the fields in ``fields_list``. Default
1273        values are determined by the context, user defaults, and the model
1274        itself.
1275
1276        :param list fields_list: names of field whose default is requested
1277        :return: a dictionary mapping field names to their corresponding default values,
1278            if they have a default value.
1279        :rtype: dict
1280
1281        .. note::
1282
1283            Unrequested defaults won't be considered, there is no need to return a
1284            value for fields whose names are not in `fields_list`.
1285        """
1286        # trigger view init hook
1287        self.view_init(fields_list)
1288
1289        defaults = {}
1290        parent_fields = defaultdict(list)
1291        ir_defaults = self.env['ir.default'].get_model_defaults(self._name)
1292
1293        for name in fields_list:
1294            # 1. look up context
1295            key = 'default_' + name
1296            if key in self._context:
1297                defaults[name] = self._context[key]
1298                continue
1299
1300            # 2. look up ir.default
1301            if name in ir_defaults:
1302                defaults[name] = ir_defaults[name]
1303                continue
1304
1305            field = self._fields.get(name)
1306
1307            # 3. look up field.default
1308            if field and field.default:
1309                defaults[name] = field.default(self)
1310                continue
1311
1312            # 4. delegate to parent model
1313            if field and field.inherited:
1314                field = field.related_field
1315                parent_fields[field.model_name].append(field.name)
1316
1317        # convert default values to the right format
1318        #
1319        # we explicitly avoid using _convert_to_write() for x2many fields,
1320        # because the latter leaves values like [(4, 2), (4, 3)], which are not
1321        # supported by the web client as default values; stepping through the
1322        # cache allows to normalize such a list to [(6, 0, [2, 3])], which is
1323        # properly supported by the web client
1324        for fname, value in defaults.items():
1325            if fname in self._fields:
1326                field = self._fields[fname]
1327                value = field.convert_to_cache(value, self, validate=False)
1328                defaults[fname] = field.convert_to_write(value, self)
1329
1330        # add default values for inherited fields
1331        for model, names in parent_fields.items():
1332            defaults.update(self.env[model].default_get(names))
1333
1334        return defaults
1335
1336    @api.model
1337    def fields_get_keys(self):
1338        return list(self._fields)
1339
1340    @api.model
1341    def _rec_name_fallback(self):
1342        # if self._rec_name is set, it belongs to self._fields
1343        return self._rec_name or 'id'
1344
1345    #
1346    # Override this method if you need a window title that depends on the context
1347    #
1348    @api.model
1349    def view_header_get(self, view_id=None, view_type='form'):
1350        return False
1351
1352    @api.model
1353    def user_has_groups(self, groups):
1354        """Return true if the user is member of at least one of the groups in
1355        ``groups``, and is not a member of any of the groups in ``groups``
1356        preceded by ``!``. Typically used to resolve ``groups`` attribute in
1357        view and model definitions.
1358
1359        :param str groups: comma-separated list of fully-qualified group
1360            external IDs, e.g., ``base.group_user,base.group_system``,
1361            optionally preceded by ``!``
1362        :return: True if the current user is a member of one of the given groups
1363            not preceded by ``!`` and is not member of any of the groups
1364            preceded by ``!``
1365        """
1366        from odoo.http import request
1367        user = self.env.user
1368
1369        has_groups = []
1370        not_has_groups = []
1371        for group_ext_id in groups.split(','):
1372            group_ext_id = group_ext_id.strip()
1373            if group_ext_id[0] == '!':
1374                not_has_groups.append(group_ext_id[1:])
1375            else:
1376                has_groups.append(group_ext_id)
1377
1378        for group_ext_id in not_has_groups:
1379            if group_ext_id == 'base.group_no_one':
1380                # check: the group_no_one is effective in debug mode only
1381                if user.has_group(group_ext_id) and request and request.session.debug:
1382                    return False
1383            else:
1384                if user.has_group(group_ext_id):
1385                    return False
1386
1387        for group_ext_id in has_groups:
1388            if group_ext_id == 'base.group_no_one':
1389                # check: the group_no_one is effective in debug mode only
1390                if user.has_group(group_ext_id) and request and request.session.debug:
1391                    return True
1392            else:
1393                if user.has_group(group_ext_id):
1394                    return True
1395
1396        return not has_groups
1397
1398    @api.model
1399    def _get_default_form_view(self):
1400        """ Generates a default single-line form view using all fields
1401        of the current model.
1402
1403        :returns: a form view as an lxml document
1404        :rtype: etree._Element
1405        """
1406        group = E.group(col="4")
1407        for fname, field in self._fields.items():
1408            if field.automatic:
1409                continue
1410            elif field.type in ('one2many', 'many2many', 'text', 'html'):
1411                group.append(E.newline())
1412                group.append(E.field(name=fname, colspan="4"))
1413                group.append(E.newline())
1414            else:
1415                group.append(E.field(name=fname))
1416        group.append(E.separator())
1417        return E.form(E.sheet(group, string=self._description))
1418
1419    @api.model
1420    def _get_default_search_view(self):
1421        """ Generates a single-field search view, based on _rec_name.
1422
1423        :returns: a tree view as an lxml document
1424        :rtype: etree._Element
1425        """
1426        element = E.field(name=self._rec_name_fallback())
1427        return E.search(element, string=self._description)
1428
1429    @api.model
1430    def _get_default_tree_view(self):
1431        """ Generates a single-field tree view, based on _rec_name.
1432
1433        :returns: a tree view as an lxml document
1434        :rtype: etree._Element
1435        """
1436        element = E.field(name=self._rec_name_fallback())
1437        return E.tree(element, string=self._description)
1438
1439    @api.model
1440    def _get_default_pivot_view(self):
1441        """ Generates an empty pivot view.
1442
1443        :returns: a pivot view as an lxml document
1444        :rtype: etree._Element
1445        """
1446        return E.pivot(string=self._description)
1447
1448    @api.model
1449    def _get_default_kanban_view(self):
1450        """ Generates a single-field kanban view, based on _rec_name.
1451
1452        :returns: a kanban view as an lxml document
1453        :rtype: etree._Element
1454        """
1455
1456        field = E.field(name=self._rec_name_fallback())
1457        content_div = E.div(field, {'class': "o_kanban_card_content"})
1458        card_div = E.div(content_div, {'t-attf-class': "oe_kanban_card oe_kanban_global_click"})
1459        kanban_box = E.t(card_div, {'t-name': "kanban-box"})
1460        templates = E.templates(kanban_box)
1461        return E.kanban(templates, string=self._description)
1462
1463    @api.model
1464    def _get_default_graph_view(self):
1465        """ Generates a single-field graph view, based on _rec_name.
1466
1467        :returns: a graph view as an lxml document
1468        :rtype: etree._Element
1469        """
1470        element = E.field(name=self._rec_name_fallback())
1471        return E.graph(element, string=self._description)
1472
1473    @api.model
1474    def _get_default_calendar_view(self):
1475        """ Generates a default calendar view by trying to infer
1476        calendar fields from a number of pre-set attribute names
1477
1478        :returns: a calendar view
1479        :rtype: etree._Element
1480        """
1481        def set_first_of(seq, in_, to):
1482            """Sets the first value of ``seq`` also found in ``in_`` to
1483            the ``to`` attribute of the ``view`` being closed over.
1484
1485            Returns whether it's found a suitable value (and set it on
1486            the attribute) or not
1487            """
1488            for item in seq:
1489                if item in in_:
1490                    view.set(to, item)
1491                    return True
1492            return False
1493
1494        view = E.calendar(string=self._description)
1495        view.append(E.field(name=self._rec_name_fallback()))
1496
1497        if not set_first_of([self._date_name, 'date', 'date_start', 'x_date', 'x_date_start'],
1498                            self._fields, 'date_start'):
1499            raise UserError(_("Insufficient fields for Calendar View!"))
1500
1501        set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1502                     self._fields, 'color')
1503
1504        if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1505                            self._fields, 'date_stop'):
1506            if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1507                                self._fields, 'date_delay'):
1508                raise UserError(_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay", self._name))
1509
1510        return view
1511
1512    @api.model
1513    def load_views(self, views, options=None):
1514        """ Returns the fields_views of given views, along with the fields of
1515            the current model, and optionally its filters for the given action.
1516
1517        :param views: list of [view_id, view_type]
1518        :param options['toolbar']: True to include contextual actions when loading fields_views
1519        :param options['load_filters']: True to return the model's filters
1520        :param options['action_id']: id of the action to get the filters
1521        :return: dictionary with fields_views, fields and optionally filters
1522        """
1523        options = options or {}
1524        result = {}
1525
1526        toolbar = options.get('toolbar')
1527        result['fields_views'] = {
1528            v_type: self.fields_view_get(v_id, v_type if v_type != 'list' else 'tree',
1529                                         toolbar=toolbar if v_type != 'search' else False)
1530            for [v_id, v_type] in views
1531        }
1532        result['fields'] = self.fields_get()
1533
1534        if options.get('load_filters'):
1535            result['filters'] = self.env['ir.filters'].get_filters(self._name, options.get('action_id'))
1536
1537
1538        return result
1539
1540    @api.model
1541    def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
1542        View = self.env['ir.ui.view'].sudo()
1543        result = {
1544            'model': self._name,
1545            'field_parent': False,
1546        }
1547
1548        # try to find a view_id if none provided
1549        if not view_id:
1550            # <view_type>_view_ref in context can be used to overrride the default view
1551            view_ref_key = view_type + '_view_ref'
1552            view_ref = self._context.get(view_ref_key)
1553            if view_ref:
1554                if '.' in view_ref:
1555                    module, view_ref = view_ref.split('.', 1)
1556                    query = "SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s"
1557                    self._cr.execute(query, (module, view_ref))
1558                    view_ref_res = self._cr.fetchone()
1559                    if view_ref_res:
1560                        view_id = view_ref_res[0]
1561                else:
1562                    _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1563                        'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1564                        self._name)
1565
1566            if not view_id:
1567                # otherwise try to find the lowest priority matching ir.ui.view
1568                view_id = View.default_view(self._name, view_type)
1569
1570        if view_id:
1571            # read the view with inherited views applied
1572            root_view = View.browse(view_id).read_combined(['id', 'name', 'field_parent', 'type', 'model', 'arch'])
1573            result['arch'] = root_view['arch']
1574            result['name'] = root_view['name']
1575            result['type'] = root_view['type']
1576            result['view_id'] = root_view['id']
1577            result['field_parent'] = root_view['field_parent']
1578            result['base_model'] = root_view['model']
1579        else:
1580            # fallback on default views methods if no ir.ui.view could be found
1581            try:
1582                arch_etree = getattr(self, '_get_default_%s_view' % view_type)()
1583                result['arch'] = etree.tostring(arch_etree, encoding='unicode')
1584                result['type'] = view_type
1585                result['name'] = 'default'
1586            except AttributeError:
1587                raise UserError(_("No default view of type '%s' could be found !", view_type))
1588        return result
1589
1590    @api.model
1591    def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
1592        """ fields_view_get([view_id | view_type='form'])
1593
1594        Get the detailed composition of the requested view like fields, model, view architecture
1595
1596        :param int view_id: id of the view or None
1597        :param str view_type: type of the view to return if view_id is None ('form', 'tree', ...)
1598        :param bool toolbar: true to include contextual actions
1599        :param submenu: deprecated
1600        :return: composition of the requested view (including inherited views and extensions)
1601        :rtype: dict
1602        :raise AttributeError:
1603                * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1604                * if some tag other than 'position' is found in parent view
1605        :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1606        """
1607        self.check_access_rights('read')
1608        view = self.env['ir.ui.view'].sudo().browse(view_id)
1609
1610        # Get the view arch and all other attributes describing the composition of the view
1611        result = self._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
1612
1613        # Override context for postprocessing
1614        if view_id and result.get('base_model', self._name) != self._name:
1615            view = view.with_context(base_model_name=result['base_model'])
1616
1617        # Apply post processing, groups and modifiers etc...
1618        xarch, xfields = view.postprocess_and_fields(etree.fromstring(result['arch']), model=self._name)
1619        result['arch'] = xarch
1620        result['fields'] = xfields
1621
1622        # Add related action information if aksed
1623        if toolbar:
1624            vt = 'list' if view_type == 'tree' else view_type
1625            bindings = self.env['ir.actions.actions'].get_bindings(self._name)
1626            resreport = [action
1627                         for action in bindings['report']
1628                         if vt in (action.get('binding_view_types') or vt).split(',')]
1629            resaction = [action
1630                         for action in bindings['action']
1631                         if vt in (action.get('binding_view_types') or vt).split(',')]
1632
1633            result['toolbar'] = {
1634                'print': resreport,
1635                'action': resaction,
1636            }
1637        return result
1638
1639    def get_formview_id(self, access_uid=None):
1640        """ Return an view id to open the document ``self`` with. This method is
1641            meant to be overridden in addons that want to give specific view ids
1642            for example.
1643
1644            Optional access_uid holds the user that would access the form view
1645            id different from the current environment user.
1646        """
1647        return False
1648
1649    def get_formview_action(self, access_uid=None):
1650        """ Return an action to open the document ``self``. This method is meant
1651            to be overridden in addons that want to give specific view ids for
1652            example.
1653
1654        An optional access_uid holds the user that will access the document
1655        that could be different from the current user. """
1656        view_id = self.sudo().get_formview_id(access_uid=access_uid)
1657        return {
1658            'type': 'ir.actions.act_window',
1659            'res_model': self._name,
1660            'view_type': 'form',
1661            'view_mode': 'form',
1662            'views': [(view_id, 'form')],
1663            'target': 'current',
1664            'res_id': self.id,
1665            'context': dict(self._context),
1666        }
1667
1668    def get_access_action(self, access_uid=None):
1669        """ Return an action to open the document. This method is meant to be
1670        overridden in addons that want to give specific access to the document.
1671        By default it opens the formview of the document.
1672
1673        An optional access_uid holds the user that will access the document
1674        that could be different from the current user.
1675        """
1676        return self[0].get_formview_action(access_uid=access_uid)
1677
1678    @api.model
1679    def search_count(self, args):
1680        """ search_count(args) -> int
1681
1682        Returns the number of records in the current model matching :ref:`the
1683        provided domain <reference/orm/domains>`.
1684        """
1685        res = self.search(args, count=True)
1686        return res if isinstance(res, int) else len(res)
1687
1688    @api.model
1689    @api.returns('self',
1690        upgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else self.browse(value),
1691        downgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else value.ids)
1692    def search(self, args, offset=0, limit=None, order=None, count=False):
1693        """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1694
1695        Searches for records based on the ``args``
1696        :ref:`search domain <reference/orm/domains>`.
1697
1698        :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1699                     list to match all records.
1700        :param int offset: number of results to ignore (default: none)
1701        :param int limit: maximum number of records to return (default: all)
1702        :param str order: sort string
1703        :param bool count: if True, only counts and returns the number of matching records (default: False)
1704        :returns: at most ``limit`` records matching the search criteria
1705
1706        :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1707        """
1708        res = self._search(args, offset=offset, limit=limit, order=order, count=count)
1709        return res if count else self.browse(res)
1710
1711    #
1712    # display_name, name_get, name_create, name_search
1713    #
1714
1715    @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1716    def _compute_display_name(self):
1717        """Compute the value of the `display_name` field.
1718
1719        In general `display_name` is equal to calling `name_get()[0][1]`.
1720
1721        In that case, it is recommended to use `display_name` to uniformize the
1722        code and to potentially take advantage of prefetch when applicable.
1723
1724        However some models might override this method. For them, the behavior
1725        might differ, and it is important to select which of `display_name` or
1726        `name_get()[0][1]` to call depending on the desired result.
1727        """
1728        names = dict(self.name_get())
1729        for record in self:
1730            record.display_name = names.get(record.id, False)
1731
1732    def name_get(self):
1733        """ name_get() -> [(id, name), ...]
1734
1735        Returns a textual representation for the records in ``self``.
1736        By default this is the value of the ``display_name`` field.
1737
1738        :return: list of pairs ``(id, text_repr)`` for each records
1739        :rtype: list(tuple)
1740        """
1741        result = []
1742        name = self._rec_name
1743        if name in self._fields:
1744            convert = self._fields[name].convert_to_display_name
1745            for record in self:
1746                result.append((record.id, convert(record[name], record)))
1747        else:
1748            for record in self:
1749                result.append((record.id, "%s,%s" % (record._name, record.id)))
1750
1751        return result
1752
1753    @api.model
1754    def name_create(self, name):
1755        """ name_create(name) -> record
1756
1757        Create a new record by calling :meth:`~.create` with only one value
1758        provided: the display name of the new record.
1759
1760        The new record will be initialized with any default values
1761        applicable to this model, or provided through the context. The usual
1762        behavior of :meth:`~.create` applies.
1763
1764        :param name: display name of the record to create
1765        :rtype: tuple
1766        :return: the :meth:`~.name_get` pair value of the created record
1767        """
1768        if self._rec_name:
1769            record = self.create({self._rec_name: name})
1770            return record.name_get()[0]
1771        else:
1772            _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1773            return False
1774
1775    @api.model
1776    def name_search(self, name='', args=None, operator='ilike', limit=100):
1777        """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1778
1779        Search for records that have a display name matching the given
1780        ``name`` pattern when compared with the given ``operator``, while also
1781        matching the optional search domain (``args``).
1782
1783        This is used for example to provide suggestions based on a partial
1784        value for a relational field. Sometimes be seen as the inverse
1785        function of :meth:`~.name_get`, but it is not guaranteed to be.
1786
1787        This method is equivalent to calling :meth:`~.search` with a search
1788        domain based on ``display_name`` and then :meth:`~.name_get` on the
1789        result of the search.
1790
1791        :param str name: the name pattern to match
1792        :param list args: optional search domain (see :meth:`~.search` for
1793                          syntax), specifying further restrictions
1794        :param str operator: domain operator for matching ``name``, such as
1795                             ``'like'`` or ``'='``.
1796        :param int limit: optional max number of records to return
1797        :rtype: list
1798        :return: list of pairs ``(id, text_repr)`` for all matching records.
1799        """
1800        ids = self._name_search(name, args, operator, limit=limit)
1801        return self.browse(ids).sudo().name_get()
1802
1803    @api.model
1804    def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
1805        """ _name_search(name='', args=None, operator='ilike', limit=100, name_get_uid=None) -> ids
1806
1807        Private implementation of name_search, allows passing a dedicated user
1808        for the name_get part to solve some access rights issues.
1809        """
1810        args = list(args or [])
1811        # optimize out the default criterion of ``ilike ''`` that matches everything
1812        if not self._rec_name:
1813            _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1814        elif not (name == '' and operator == 'ilike'):
1815            args += [(self._rec_name, operator, name)]
1816        return self._search(args, limit=limit, access_rights_uid=name_get_uid)
1817
1818    @api.model
1819    def _add_missing_default_values(self, values):
1820        # avoid overriding inherited values when parent is set
1821        avoid_models = set()
1822
1823        def collect_models_to_avoid(model):
1824            for parent_mname, parent_fname in model._inherits.items():
1825                if parent_fname in values:
1826                    avoid_models.add(parent_mname)
1827                else:
1828                    # manage the case where an ancestor parent field is set
1829                    collect_models_to_avoid(self.env[parent_mname])
1830
1831        collect_models_to_avoid(self)
1832
1833        def avoid(field):
1834            # check whether the field is inherited from one of avoid_models
1835            if avoid_models:
1836                while field.inherited:
1837                    field = field.related_field
1838                    if field.model_name in avoid_models:
1839                        return True
1840            return False
1841
1842        # compute missing fields
1843        missing_defaults = {
1844            name
1845            for name, field in self._fields.items()
1846            if name not in values
1847            if not avoid(field)
1848        }
1849
1850        if not missing_defaults:
1851            return values
1852
1853        # override defaults with the provided values, never allow the other way around
1854        defaults = self.default_get(list(missing_defaults))
1855        for name, value in defaults.items():
1856            if self._fields[name].type == 'many2many' and value and isinstance(value[0], int):
1857                # convert a list of ids into a list of commands
1858                defaults[name] = [(6, 0, value)]
1859            elif self._fields[name].type == 'one2many' and value and isinstance(value[0], dict):
1860                # convert a list of dicts into a list of commands
1861                defaults[name] = [(0, 0, x) for x in value]
1862        defaults.update(values)
1863        return defaults
1864
1865    @classmethod
1866    def clear_caches(cls):
1867        """ Clear the caches
1868
1869        This clears the caches associated to methods decorated with
1870        ``tools.ormcache`` or ``tools.ormcache_multi``.
1871        """
1872        cls.pool._clear_cache()
1873
1874    @api.model
1875    def _read_group_expand_full(self, groups, domain, order):
1876        """Extend the group to include all targer records by default."""
1877        return groups.search([], order=order)
1878
1879    @api.model
1880    def _read_group_fill_results(self, domain, groupby, remaining_groupbys,
1881                                 aggregated_fields, count_field,
1882                                 read_group_result, read_group_order=None):
1883        """Helper method for filling in empty groups for all possible values of
1884           the field being grouped by"""
1885        field = self._fields[groupby]
1886        if not field.group_expand:
1887            return read_group_result
1888
1889        # field.group_expand is the name of a method that returns the groups
1890        # that we want to display for this field, in the form of a recordset or
1891        # a list of values (depending on the type of the field). This is useful
1892        # to implement kanban views for instance, where some columns should be
1893        # displayed even if they don't contain any record.
1894
1895        # determine all groups that should be returned
1896        values = [line[groupby] for line in read_group_result if line[groupby]]
1897
1898        if field.relational:
1899            # groups is a recordset; determine order on groups's model
1900            groups = self.env[field.comodel_name].browse([value[0] for value in values])
1901            order = groups._order
1902            if read_group_order == groupby + ' desc':
1903                order = tools.reverse_order(order)
1904            groups = getattr(self, field.group_expand)(groups, domain, order)
1905            groups = groups.sudo()
1906            values = lazy_name_get(groups)
1907            value2key = lambda value: value and value[0]
1908
1909        else:
1910            # groups is a list of values
1911            values = getattr(self, field.group_expand)(values, domain, None)
1912            if read_group_order == groupby + ' desc':
1913                values.reverse()
1914            value2key = lambda value: value
1915
1916        # Merge the current results (list of dicts) with all groups. Determine
1917        # the global order of results groups, which is supposed to be in the
1918        # same order as read_group_result (in the case of a many2one field).
1919        result = OrderedDict((value2key(value), {}) for value in values)
1920
1921        # fill in results from read_group_result
1922        for line in read_group_result:
1923            key = value2key(line[groupby])
1924            if not result.get(key):
1925                result[key] = line
1926            else:
1927                result[key][count_field] = line[count_field]
1928
1929        # fill in missing results from all groups
1930        for value in values:
1931            key = value2key(value)
1932            if not result[key]:
1933                line = dict.fromkeys(aggregated_fields, False)
1934                line[groupby] = value
1935                line[groupby + '_count'] = 0
1936                line['__domain'] = [(groupby, '=', key)] + domain
1937                if remaining_groupbys:
1938                    line['__context'] = {'group_by': remaining_groupbys}
1939                result[key] = line
1940
1941        # add folding information if present
1942        if field.relational and groups._fold_name in groups._fields:
1943            fold = {group.id: group[groups._fold_name]
1944                    for group in groups.browse([key for key in result if key])}
1945            for key, line in result.items():
1946                line['__fold'] = fold.get(key, False)
1947
1948        return list(result.values())
1949
1950    @api.model
1951    def _read_group_fill_temporal(self, data, groupby, aggregated_fields, annotated_groupbys,
1952                                  interval=dateutil.relativedelta.relativedelta(months=1)):
1953        """Helper method for filling date/datetime 'holes' in a result set.
1954
1955        We are in a use case where data are grouped by a date field (typically
1956        months but it could be any other interval) and displayed in a chart.
1957
1958        Assume we group records by month, and we only have data for August,
1959        September and December. By default, plotting the result gives something
1960        like:
1961                                                ___
1962                                      ___      |   |
1963                                     |   |     |   |
1964                                     |   | ___ |   |
1965                                     |   ||   ||   |
1966                                     |___||___||___|
1967                                      Aug  Sep  Dec
1968
1969        The problem is that December data follows immediately September data,
1970        which is misleading for the user. Adding explicit zeroes for missing data
1971        gives something like:
1972                                                     ___
1973                                 ___                |   |
1974                                |   |               |   |
1975                                |   | ___           |   |
1976                                |   ||   |          |   |
1977                                |___||___| ___  ___ |___|
1978                                 Aug  Sep  Oct  Nov  Dec
1979
1980        :param list data: the data containing groups
1981        :param list groupby: name of the first group by
1982        :param list aggregated_fields: list of aggregated fields in the query
1983        :param relativedelta interval: interval between two temporal groups
1984                expressed as a relativedelta month by default
1985        :rtype: list
1986        :return: list
1987        """
1988        first_a_gby = annotated_groupbys[0]
1989        if not data:
1990            return data
1991        if first_a_gby['type'] not in ('date', 'datetime'):
1992            return data
1993        interval = first_a_gby['interval']
1994        groupby_name = groupby[0]
1995
1996        # existing non null datetimes
1997        existing = [d[groupby_name] for d in data if d[groupby_name]]
1998
1999        if len(existing) < 2:
2000            return data
2001
2002        # assumption: existing data is sorted by field 'groupby_name'
2003        first, last = existing[0], existing[-1]
2004
2005        empty_item = {'id': False, (groupby_name.split(':')[0] + '_count'): 0}
2006        empty_item.update({key: False for key in aggregated_fields})
2007        empty_item.update({key: False for key in [group['groupby'] for group in annotated_groupbys[1:]]})
2008
2009        grouped_data = collections.defaultdict(list)
2010        for d in data:
2011            grouped_data[d[groupby_name]].append(d)
2012
2013        result = []
2014
2015        for dt in date_utils.date_range(first, last, interval):
2016            result.extend(grouped_data[dt] or [dict(empty_item, **{groupby_name: dt})])
2017
2018        if False in grouped_data:
2019            result.extend(grouped_data[False])
2020
2021        return result
2022
2023    @api.model
2024    def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
2025        """
2026        Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
2027        to the query if order should be computed against m2o field.
2028        :param orderby: the orderby definition in the form "%(field)s %(order)s"
2029        :param aggregated_fields: list of aggregated fields in the query
2030        :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
2031                These dictionaries contains the qualified name of each groupby
2032                (fully qualified SQL name for the corresponding field),
2033                and the (non raw) field name.
2034        :param osv.Query query: the query under construction
2035        :return: (groupby_terms, orderby_terms)
2036        """
2037        orderby_terms = []
2038        groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
2039        if not orderby:
2040            return groupby_terms, orderby_terms
2041
2042        self._check_qorder(orderby)
2043
2044        # when a field is grouped as 'foo:bar', both orderby='foo' and
2045        # orderby='foo:bar' generate the clause 'ORDER BY "foo:bar"'
2046        groupby_fields = {
2047            gb[key]: gb['groupby']
2048            for gb in annotated_groupbys
2049            for key in ('field', 'groupby')
2050        }
2051        for order_part in orderby.split(','):
2052            order_split = order_part.split()
2053            order_field = order_split[0]
2054            if order_field == 'id' or order_field in groupby_fields:
2055                if self._fields[order_field.split(':')[0]].type == 'many2one':
2056                    order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
2057                    if order_clause:
2058                        orderby_terms.append(order_clause)
2059                        groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
2060                else:
2061                    order_split[0] = '"%s"' % groupby_fields.get(order_field, order_field)
2062                    orderby_terms.append(' '.join(order_split))
2063            elif order_field in aggregated_fields:
2064                order_split[0] = '"%s"' % order_field
2065                orderby_terms.append(' '.join(order_split))
2066            elif order_field not in self._fields:
2067                raise ValueError("Invalid field %r on model %r" % (order_field, self._name))
2068            else:
2069                # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
2070                _logger.warning('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
2071                             self._name, order_part)
2072
2073        return groupby_terms, orderby_terms
2074
2075    @api.model
2076    def _read_group_process_groupby(self, gb, query):
2077        """
2078            Helper method to collect important information about groupbys: raw
2079            field name, type, time information, qualified name, ...
2080        """
2081        split = gb.split(':')
2082        field = self._fields.get(split[0])
2083        if not field:
2084            raise ValueError("Invalid field %r on model %r" % (split[0], self._name))
2085        field_type = field.type
2086        gb_function = split[1] if len(split) == 2 else None
2087        temporal = field_type in ('date', 'datetime')
2088        tz_convert = field_type == 'datetime' and self._context.get('tz') in pytz.all_timezones
2089        qualified_field = self._inherits_join_calc(self._table, split[0], query)
2090        if temporal:
2091            display_formats = {
2092                # Careful with week/year formats:
2093                #  - yyyy (lower) must always be used, *except* for week+year formats
2094                #  - YYYY (upper) must always be used for week+year format
2095                #         e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
2096                #                         and W1 2006 for others
2097                #
2098                # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
2099                # such as 2006-01-01 being formatted as "January 2005" in some locales.
2100                # Cfr: http://babel.pocoo.org/en/latest/dates.html#date-fields
2101                'hour': 'hh:00 dd MMM',
2102                'day': 'dd MMM yyyy', # yyyy = normal year
2103                'week': "'W'w YYYY",  # w YYYY = ISO week-year
2104                'month': 'MMMM yyyy',
2105                'quarter': 'QQQ yyyy',
2106                'year': 'yyyy',
2107            }
2108            time_intervals = {
2109                'hour': dateutil.relativedelta.relativedelta(hours=1),
2110                'day': dateutil.relativedelta.relativedelta(days=1),
2111                'week': datetime.timedelta(days=7),
2112                'month': dateutil.relativedelta.relativedelta(months=1),
2113                'quarter': dateutil.relativedelta.relativedelta(months=3),
2114                'year': dateutil.relativedelta.relativedelta(years=1)
2115            }
2116            if tz_convert:
2117                qualified_field = "timezone('%s', timezone('UTC',%s))" % (self._context.get('tz', 'UTC'), qualified_field)
2118            qualified_field = "date_trunc('%s', %s::timestamp)" % (gb_function or 'month', qualified_field)
2119        if field_type == 'boolean':
2120            qualified_field = "coalesce(%s,false)" % qualified_field
2121        return {
2122            'field': split[0],
2123            'groupby': gb,
2124            'type': field_type,
2125            'display_format': display_formats[gb_function or 'month'] if temporal else None,
2126            'interval': time_intervals[gb_function or 'month'] if temporal else None,
2127            'tz_convert': tz_convert,
2128            'qualified_field': qualified_field,
2129        }
2130
2131    @api.model
2132    def _read_group_prepare_data(self, key, value, groupby_dict):
2133        """
2134            Helper method to sanitize the data received by read_group. The None
2135            values are converted to False, and the date/datetime are formatted,
2136            and corrected according to the timezones.
2137        """
2138        value = False if value is None else value
2139        gb = groupby_dict.get(key)
2140        if gb and gb['type'] in ('date', 'datetime') and value:
2141            if isinstance(value, str):
2142                dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2143                value = datetime.datetime.strptime(value, dt_format)
2144            if gb['tz_convert']:
2145                value = pytz.timezone(self._context['tz']).localize(value)
2146        return value
2147
2148    @api.model
2149    def _read_group_format_result(self, data, annotated_groupbys, groupby, domain):
2150        """
2151            Helper method to format the data contained in the dictionary data by
2152            adding the domain corresponding to its values, the groupbys in the
2153            context and by properly formatting the date/datetime values.
2154
2155        :param data: a single group
2156        :param annotated_groupbys: expanded grouping metainformation
2157        :param groupby: original grouping metainformation
2158        :param domain: original domain for read_group
2159        """
2160
2161        sections = []
2162        for gb in annotated_groupbys:
2163            ftype = gb['type']
2164            value = data[gb['groupby']]
2165
2166            # full domain for this groupby spec
2167            d = None
2168            if value:
2169                if ftype == 'many2one':
2170                    value = value[0]
2171                elif ftype in ('date', 'datetime'):
2172                    locale = get_lang(self.env).code
2173                    fmt = DEFAULT_SERVER_DATETIME_FORMAT if ftype == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2174                    tzinfo = None
2175                    range_start = value
2176                    range_end = value + gb['interval']
2177                    # value from postgres is in local tz (so range is
2178                    # considered in local tz e.g. "day" is [00:00, 00:00[
2179                    # local rather than UTC which could be [11:00, 11:00]
2180                    # local) but domain and raw value should be in UTC
2181                    if gb['tz_convert']:
2182                        tzinfo = range_start.tzinfo
2183                        range_start = range_start.astimezone(pytz.utc)
2184                        # take into account possible hour change between start and end
2185                        range_end = tzinfo.localize(range_end.replace(tzinfo=None))
2186                        range_end = range_end.astimezone(pytz.utc)
2187
2188                    range_start = range_start.strftime(fmt)
2189                    range_end = range_end.strftime(fmt)
2190                    if ftype == 'datetime':
2191                        label = babel.dates.format_datetime(
2192                            value, format=gb['display_format'],
2193                            tzinfo=tzinfo, locale=locale
2194                        )
2195                    else:
2196                        label = babel.dates.format_date(
2197                            value, format=gb['display_format'],
2198                            locale=locale
2199                        )
2200                    data[gb['groupby']] = ('%s/%s' % (range_start, range_end), label)
2201                    d = [
2202                        '&',
2203                        (gb['field'], '>=', range_start),
2204                        (gb['field'], '<', range_end),
2205                    ]
2206
2207            if d is None:
2208                d = [(gb['field'], '=', value)]
2209            sections.append(d)
2210        sections.append(domain)
2211
2212        data['__domain'] = expression.AND(sections)
2213        if len(groupby) - len(annotated_groupbys) >= 1:
2214            data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2215        del data['id']
2216        return data
2217
2218    @api.model
2219    def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
2220        """Get the list of records in list view grouped by the given ``groupby`` fields.
2221
2222        :param list domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
2223                     list to match all records.
2224        :param list fields: list of fields present in the list view specified on the object.
2225                Each element is either 'field' (field name, using the default aggregation),
2226                or 'field:agg' (aggregate field with aggregation function 'agg'),
2227                or 'name:agg(field)' (aggregate field with 'agg' and return it as 'name').
2228                The possible aggregation functions are the ones provided by PostgreSQL
2229                (https://www.postgresql.org/docs/current/static/functions-aggregate.html)
2230                and 'count_distinct', with the expected meaning.
2231        :param list groupby: list of groupby descriptions by which the records will be grouped.
2232                A groupby description is either a field (then it will be grouped by that field)
2233                or a string 'field:groupby_function'.  Right now, the only functions supported
2234                are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2235                date/datetime fields.
2236        :param int offset: optional number of records to skip
2237        :param int limit: optional max number of records to return
2238        :param str orderby: optional ``order by`` specification, for
2239                             overriding the natural sort ordering of the
2240                             groups, see also :py:meth:`~osv.osv.osv.search`
2241                             (supported only for many2one fields currently)
2242        :param bool lazy: if true, the results are only grouped by the first groupby and the
2243                remaining groupbys are put in the __context key.  If false, all the groupbys are
2244                done in one call.
2245        :return: list of dictionaries(one dictionary for each record) containing:
2246
2247                    * the values of fields grouped by the fields in ``groupby`` argument
2248                    * __domain: list of tuples specifying the search criteria
2249                    * __context: dictionary with argument like ``groupby``
2250        :rtype: [{'field_name_1': value, ...]
2251        :raise AccessError: * if user has no read rights on the requested object
2252                            * if user tries to bypass access rules for read on the requested object
2253        """
2254        result = self._read_group_raw(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
2255
2256        groupby = [groupby] if isinstance(groupby, str) else list(OrderedSet(groupby))
2257        dt = [
2258            f for f in groupby
2259            if self._fields[f.split(':')[0]].type in ('date', 'datetime')    # e.g. 'date:month'
2260        ]
2261
2262        # iterate on all results and replace the "full" date/datetime value
2263        # (range, label) by just the formatted label, in-place
2264        for group in result:
2265            for df in dt:
2266                # could group on a date(time) field which is empty in some
2267                # records, in which case as with m2o the _raw value will be
2268                # `False` instead of a (value, label) pair. In that case,
2269                # leave the `False` value alone
2270                if group.get(df):
2271                    group[df] = group[df][1]
2272        return result
2273
2274    @api.model
2275    def _read_group_raw(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
2276        self.check_access_rights('read')
2277        query = self._where_calc(domain)
2278        fields = fields or [f.name for f in self._fields.values() if f.store]
2279
2280        groupby = [groupby] if isinstance(groupby, str) else list(OrderedSet(groupby))
2281        groupby_list = groupby[:1] if lazy else groupby
2282        annotated_groupbys = [self._read_group_process_groupby(gb, query) for gb in groupby_list]
2283        groupby_fields = [g['field'] for g in annotated_groupbys]
2284        order = orderby or ','.join([g for g in groupby_list])
2285        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2286
2287        self._apply_ir_rules(query, 'read')
2288        for gb in groupby_fields:
2289            if gb not in self._fields:
2290                raise UserError(_("Unknown field %r in 'groupby'") % gb)
2291            gb_field = self._fields[gb].base_field
2292            if not (gb_field.store and gb_field.column_type):
2293                raise UserError(_("Fields in 'groupby' must be database-persisted fields (no computed fields)"))
2294
2295        aggregated_fields = []
2296        select_terms = []
2297        fnames = []                     # list of fields to flush
2298
2299        for fspec in fields:
2300            if fspec == 'sequence':
2301                continue
2302            if fspec == '__count':
2303                # the web client sometimes adds this pseudo-field in the list
2304                continue
2305
2306            match = regex_field_agg.match(fspec)
2307            if not match:
2308                raise UserError(_("Invalid field specification %r.", fspec))
2309
2310            name, func, fname = match.groups()
2311            if func:
2312                # we have either 'name:func' or 'name:func(fname)'
2313                fname = fname or name
2314                field = self._fields.get(fname)
2315                if not field:
2316                    raise ValueError("Invalid field %r on model %r" % (fname, self._name))
2317                if not (field.base_field.store and field.base_field.column_type):
2318                    raise UserError(_("Cannot aggregate field %r.", fname))
2319                if func not in VALID_AGGREGATE_FUNCTIONS:
2320                    raise UserError(_("Invalid aggregation function %r.", func))
2321            else:
2322                # we have 'name', retrieve the aggregator on the field
2323                field = self._fields.get(name)
2324                if not field:
2325                    raise ValueError("Invalid field %r on model %r" % (name, self._name))
2326                if not (field.base_field.store and
2327                        field.base_field.column_type and field.group_operator):
2328                    continue
2329                func, fname = field.group_operator, name
2330
2331            fnames.append(fname)
2332
2333            if fname in groupby_fields:
2334                continue
2335            if name in aggregated_fields:
2336                raise UserError(_("Output name %r is used twice.", name))
2337            aggregated_fields.append(name)
2338
2339            expr = self._inherits_join_calc(self._table, fname, query)
2340            if func.lower() == 'count_distinct':
2341                term = 'COUNT(DISTINCT %s) AS "%s"' % (expr, name)
2342            else:
2343                term = '%s(%s) AS "%s"' % (func, expr, name)
2344            select_terms.append(term)
2345
2346        for gb in annotated_groupbys:
2347            select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2348
2349        self._flush_search(domain, fields=fnames + groupby_fields)
2350
2351        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2352        from_clause, where_clause, where_clause_params = query.get_sql()
2353        if lazy and (len(groupby_fields) >= 2 or not self._context.get('group_by_no_leaf')):
2354            count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2355        else:
2356            count_field = '_'
2357        count_field += '_count'
2358
2359        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2360        prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2361
2362        query = """
2363            SELECT min("%(table)s".id) AS id, count("%(table)s".id) AS "%(count_field)s" %(extra_fields)s
2364            FROM %(from)s
2365            %(where)s
2366            %(groupby)s
2367            %(orderby)s
2368            %(limit)s
2369            %(offset)s
2370        """ % {
2371            'table': self._table,
2372            'count_field': count_field,
2373            'extra_fields': prefix_terms(',', select_terms),
2374            'from': from_clause,
2375            'where': prefix_term('WHERE', where_clause),
2376            'groupby': prefix_terms('GROUP BY', groupby_terms),
2377            'orderby': prefix_terms('ORDER BY', orderby_terms),
2378            'limit': prefix_term('LIMIT', int(limit) if limit else None),
2379            'offset': prefix_term('OFFSET', int(offset) if limit else None),
2380        }
2381        self._cr.execute(query, where_clause_params)
2382        fetched_data = self._cr.dictfetchall()
2383
2384        if not groupby_fields:
2385            return fetched_data
2386
2387        self._read_group_resolve_many2one_fields(fetched_data, annotated_groupbys)
2388
2389        data = [{k: self._read_group_prepare_data(k, v, groupby_dict) for k, v in r.items()} for r in fetched_data]
2390
2391        if self.env.context.get('fill_temporal') and data:
2392            data = self._read_group_fill_temporal(data, groupby, aggregated_fields,
2393                                                  annotated_groupbys)
2394
2395        result = [self._read_group_format_result(d, annotated_groupbys, groupby, domain) for d in data]
2396
2397        if lazy:
2398            # Right now, read_group only fill results in lazy mode (by default).
2399            # If you need to have the empty groups in 'eager' mode, then the
2400            # method _read_group_fill_results need to be completely reimplemented
2401            # in a sane way
2402            result = self._read_group_fill_results(
2403                domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2404                aggregated_fields, count_field, result, read_group_order=order,
2405            )
2406        return result
2407
2408    def _read_group_resolve_many2one_fields(self, data, fields):
2409        many2onefields = {field['field'] for field in fields if field['type'] == 'many2one'}
2410        for field in many2onefields:
2411            ids_set = {d[field] for d in data if d[field]}
2412            m2o_records = self.env[self._fields[field].comodel_name].browse(ids_set)
2413            data_dict = dict(lazy_name_get(m2o_records.sudo()))
2414            for d in data:
2415                d[field] = (d[field], data_dict[d[field]]) if d[field] else False
2416
2417    def _inherits_join_add(self, current_model, parent_model_name, query):
2418        """
2419        Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2420        :param current_model: current model object
2421        :param parent_model_name: name of the parent model for which the clauses should be added
2422        :param query: query object on which the JOIN should be added
2423        """
2424        inherits_field = current_model._inherits[parent_model_name]
2425        parent_model = self.env[parent_model_name]
2426        parent_alias = query.left_join(
2427            current_model._table, inherits_field, parent_model._table, 'id', inherits_field,
2428        )
2429        return parent_alias
2430
2431    @api.model
2432    def _inherits_join_calc(self, alias, fname, query):
2433        """
2434        Adds missing table select and join clause(s) to ``query`` for reaching
2435        the field coming from an '_inherits' parent table (no duplicates).
2436
2437        :param alias: name of the initial SQL alias
2438        :param fname: name of inherited field to reach
2439        :param query: query object on which the JOIN should be added
2440        :return: qualified name of field, to be used in SELECT clause
2441        """
2442        # INVARIANT: alias is the SQL alias of model._table in query
2443        model, field = self, self._fields[fname]
2444        while field.inherited:
2445            # retrieve the parent model where field is inherited from
2446            parent_model = self.env[field.related_field.model_name]
2447            parent_fname = field.related[0]
2448            # JOIN parent_model._table AS parent_alias ON alias.parent_fname = parent_alias.id
2449            parent_alias = query.left_join(
2450                alias, parent_fname, parent_model._table, 'id', parent_fname,
2451            )
2452            model, alias, field = parent_model, parent_alias, field.related_field
2453        # handle the case where the field is translated
2454        if field.translate is True:
2455            return model._generate_translated_field(alias, fname, query)
2456        else:
2457            return '"%s"."%s"' % (alias, fname)
2458
2459    def _parent_store_compute(self):
2460        """ Compute parent_path field from scratch. """
2461        if not self._parent_store:
2462            return
2463
2464        # Each record is associated to a string 'parent_path', that represents
2465        # the path from the record's root node to the record. The path is made
2466        # of the node ids suffixed with a slash (see example below). The nodes
2467        # in the subtree of record are the ones where 'parent_path' starts with
2468        # the 'parent_path' of record.
2469        #
2470        #               a                 node | id | parent_path
2471        #              / \                  a  | 42 | 42/
2472        #            ...  b                 b  | 63 | 42/63/
2473        #                / \                c  | 84 | 42/63/84/
2474        #               c   d               d  | 85 | 42/63/85/
2475        #
2476        # Note: the final '/' is necessary to match subtrees correctly: '42/63'
2477        # is a prefix of '42/630', but '42/63/' is not a prefix of '42/630/'.
2478        _logger.info('Computing parent_path for table %s...', self._table)
2479        query = """
2480            WITH RECURSIVE __parent_store_compute(id, parent_path) AS (
2481                SELECT row.id, concat(row.id, '/')
2482                FROM {table} row
2483                WHERE row.{parent} IS NULL
2484            UNION
2485                SELECT row.id, concat(comp.parent_path, row.id, '/')
2486                FROM {table} row, __parent_store_compute comp
2487                WHERE row.{parent} = comp.id
2488            )
2489            UPDATE {table} row SET parent_path = comp.parent_path
2490            FROM __parent_store_compute comp
2491            WHERE row.id = comp.id
2492        """.format(table=self._table, parent=self._parent_name)
2493        self.env.cr.execute(query)
2494        self.invalidate_cache(['parent_path'])
2495        return True
2496
2497    def _check_removed_columns(self, log=False):
2498        # iterate on the database columns to drop the NOT NULL constraints of
2499        # fields which were required but have been removed (or will be added by
2500        # another module)
2501        cr = self._cr
2502        cols = [name for name, field in self._fields.items()
2503                     if field.store and field.column_type]
2504        cr.execute("SELECT a.attname, a.attnotnull"
2505                   "  FROM pg_class c, pg_attribute a"
2506                   " WHERE c.relname=%s"
2507                   "   AND c.oid=a.attrelid"
2508                   "   AND a.attisdropped=%s"
2509                   "   AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2510                   "   AND a.attname NOT IN %s", (self._table, False, tuple(cols))),
2511
2512        for row in cr.dictfetchall():
2513            if log:
2514                _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2515                              row['attname'], self._table, self._name)
2516            if row['attnotnull']:
2517                tools.drop_not_null(cr, self._table, row['attname'])
2518
2519    def _init_column(self, column_name):
2520        """ Initialize the value of the given column for existing rows. """
2521        # get the default value; ideally, we should use default_get(), but it
2522        # fails due to ir.default not being ready
2523        field = self._fields[column_name]
2524        if field.default:
2525            value = field.default(self)
2526            value = field.convert_to_write(value, self)
2527            value = field.convert_to_column(value, self)
2528        else:
2529            value = None
2530        # Write value if non-NULL, except for booleans for which False means
2531        # the same as NULL - this saves us an expensive query on large tables.
2532        necessary = (value is not None) if field.type != 'boolean' else value
2533        if necessary:
2534            _logger.debug("Table '%s': setting default value of new column %s to %r",
2535                          self._table, column_name, value)
2536            query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" IS NULL' % (
2537                self._table, column_name, field.column_format, column_name)
2538            self._cr.execute(query, (value,))
2539
2540    @ormcache()
2541    def _table_has_rows(self):
2542        """ Return whether the model's table has rows. This method should only
2543            be used when updating the database schema (:meth:`~._auto_init`).
2544        """
2545        self.env.cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2546        return self.env.cr.rowcount
2547
2548    def _auto_init(self):
2549        """ Initialize the database schema of ``self``:
2550            - create the corresponding table,
2551            - create/update the necessary columns/tables for fields,
2552            - initialize new columns on existing rows,
2553            - add the SQL constraints given on the model,
2554            - add the indexes on indexed fields,
2555
2556            Also prepare post-init stuff to:
2557            - add foreign key constraints,
2558            - reflect models, fields, relations and constraints,
2559            - mark fields to recompute on existing records.
2560
2561            Note: you should not override this method. Instead, you can modify
2562            the model's database schema by overriding method :meth:`~.init`,
2563            which is called right after this one.
2564        """
2565        raise_on_invalid_object_name(self._name)
2566
2567        # This prevents anything called by this method (in particular default
2568        # values) from prefetching a field for which the corresponding column
2569        # has not been added in database yet!
2570        self = self.with_context(prefetch_fields=False)
2571
2572        cr = self._cr
2573        update_custom_fields = self._context.get('update_custom_fields', False)
2574        must_create_table = not tools.table_exists(cr, self._table)
2575        parent_path_compute = False
2576
2577        if self._auto:
2578            if must_create_table:
2579                def make_type(field):
2580                    return field.column_type[1] + (" NOT NULL" if field.required else "")
2581
2582                tools.create_model_table(cr, self._table, self._description, [
2583                    (name, make_type(field), field.string)
2584                    for name, field in self._fields.items()
2585                    if name != 'id' and field.store and field.column_type
2586                ])
2587
2588            if self._parent_store:
2589                if not tools.column_exists(cr, self._table, 'parent_path'):
2590                    self._create_parent_columns()
2591                    parent_path_compute = True
2592
2593            if not must_create_table:
2594                self._check_removed_columns(log=False)
2595
2596            # update the database schema for fields
2597            columns = tools.table_columns(cr, self._table)
2598            fields_to_compute = []
2599
2600            for field in self._fields.values():
2601                if not field.store:
2602                    continue
2603                if field.manual and not update_custom_fields:
2604                    continue            # don't update custom fields
2605                new = field.update_db(self, columns)
2606                if new and field.compute:
2607                    fields_to_compute.append(field.name)
2608
2609            if fields_to_compute:
2610                @self.pool.post_init
2611                def mark_fields_to_compute():
2612                    recs = self.with_context(active_test=False).search([], order='id')
2613                    if not recs:
2614                        return
2615                    for field in fields_to_compute:
2616                        _logger.info("Storing computed values of %s.%s", recs._name, field)
2617                        self.env.add_to_compute(recs._fields[field], recs)
2618
2619        if self._auto:
2620            self._add_sql_constraints()
2621
2622        if must_create_table:
2623            self._execute_sql()
2624
2625        if parent_path_compute:
2626            self._parent_store_compute()
2627
2628    def init(self):
2629        """ This method is called after :meth:`~._auto_init`, and may be
2630            overridden to create or modify a model's database schema.
2631        """
2632        pass
2633
2634    def _create_parent_columns(self):
2635        tools.create_column(self._cr, self._table, 'parent_path', 'VARCHAR')
2636        if 'parent_path' not in self._fields:
2637            _logger.error("add a field parent_path on model %s: parent_path = fields.Char(index=True)", self._name)
2638        elif not self._fields['parent_path'].index:
2639            _logger.error('parent_path field on model %s must be indexed! Add index=True to the field definition)', self._name)
2640
2641    def _add_sql_constraints(self):
2642        """
2643
2644        Modify this model's database table constraints so they match the one in
2645        _sql_constraints.
2646
2647        """
2648        cr = self._cr
2649        foreign_key_re = re.compile(r'\s*foreign\s+key\b.*', re.I)
2650
2651        for (key, definition, message) in self._sql_constraints:
2652            conname = '%s_%s' % (self._table, key)
2653            current_definition = tools.constraint_definition(cr, self._table, conname)
2654            if current_definition == definition:
2655                continue
2656
2657            if current_definition:
2658                # constraint exists but its definition may have changed
2659                tools.drop_constraint(cr, self._table, conname)
2660
2661            if foreign_key_re.match(definition):
2662                self.pool.post_init(tools.add_constraint, cr, self._table, conname, definition)
2663            else:
2664                self.pool.post_constraint(tools.add_constraint, cr, self._table, conname, definition)
2665
2666    def _execute_sql(self):
2667        """ Execute the SQL code from the _sql attribute (if any)."""
2668        if hasattr(self, "_sql"):
2669            self._cr.execute(self._sql)
2670
2671    #
2672    # Update objects that uses this one to update their _inherits fields
2673    #
2674
2675    @api.model
2676    def _add_inherited_fields(self):
2677        """ Determine inherited fields. """
2678        if not self._inherits:
2679            return
2680
2681        # determine which fields can be inherited
2682        to_inherit = {
2683            name: (parent_fname, field)
2684            for parent_model_name, parent_fname in self._inherits.items()
2685            for name, field in self.env[parent_model_name]._fields.items()
2686        }
2687
2688        # add inherited fields that are not redefined locally
2689        for name, (parent_fname, field) in to_inherit.items():
2690            if name not in self._fields:
2691                # inherited fields are implemented as related fields, with the
2692                # following specific properties:
2693                #  - reading inherited fields should not bypass access rights
2694                #  - copy inherited fields iff their original field is copied
2695                self._add_field(name, field.new(
2696                    inherited=True,
2697                    inherited_field=field,
2698                    related=(parent_fname, name),
2699                    related_sudo=False,
2700                    copy=field.copy,
2701                    readonly=field.readonly,
2702                ))
2703
2704    @api.model
2705    def _inherits_check(self):
2706        for table, field_name in self._inherits.items():
2707            field = self._fields.get(field_name)
2708            if not field:
2709                _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
2710                from .fields import Many2one
2711                field = Many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade")
2712                self._add_field(field_name, field)
2713            elif not field.required or field.ondelete.lower() not in ("cascade", "restrict"):
2714                _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
2715                field.required = True
2716                field.ondelete = "cascade"
2717            field.delegate = True
2718
2719        # reflect fields with delegate=True in dictionary self._inherits
2720        for field in self._fields.values():
2721            if field.type == 'many2one' and not field.related and field.delegate:
2722                if not field.required:
2723                    _logger.warning("Field %s with delegate=True must be required.", field)
2724                    field.required = True
2725                if field.ondelete.lower() not in ('cascade', 'restrict'):
2726                    field.ondelete = 'cascade'
2727                self._inherits[field.comodel_name] = field.name
2728                self.pool[field.comodel_name]._inherits_children.add(self._name)
2729
2730    @api.model
2731    def _prepare_setup(self):
2732        """ Prepare the setup of the model. """
2733        cls = type(self)
2734        cls._setup_done = False
2735
2736        # the classes that define this model's base fields and methods
2737        cls._model_classes = tuple(c for c in cls.mro() if getattr(c, 'pool', None) is None)
2738
2739        # reset those attributes on the model's class for _setup_fields() below
2740        for attr in ('_rec_name', '_active_name'):
2741            try:
2742                delattr(cls, attr)
2743            except AttributeError:
2744                pass
2745
2746    @api.model
2747    def _setup_base(self):
2748        """ Determine the inherited and custom fields of the model. """
2749        cls = type(self)
2750        if cls._setup_done:
2751            return
2752
2753        # 1. determine the proper fields of the model: the fields defined on the
2754        # class and magic fields, not the inherited or custom ones
2755        cls0 = cls.pool.model_cache.get(cls._model_classes)
2756
2757        if cls0 and cls0._model_classes == cls._model_classes:
2758            # cls0 is either a model class from another registry, or cls itself.
2759            # The point is that it has the same base classes. We retrieve stuff
2760            # from cls0 to optimize the setup of cls. cls0 is guaranteed to be
2761            # properly set up: registries are loaded under a global lock,
2762            # therefore two registries are never set up at the same time.
2763
2764            # remove fields that are not proper to cls
2765            for name in set(cls._fields).difference(cls0._model_fields):
2766                delattr(cls, name)
2767                del cls._fields[name]
2768
2769            if cls0 is cls:
2770                # simply reset up fields
2771                for name, field in cls._fields.items():
2772                    field.setup_base(self, name)
2773            else:
2774                # collect proper fields on cls0, and add them on cls
2775                for name in cls0._model_fields:
2776                    field = cls0._fields[name]
2777                    # regular fields are shared, while related fields are setup from scratch
2778                    if not field.related:
2779                        self._add_field(name, field)
2780                    else:
2781                        self._add_field(name, field.new(**field.args))
2782                cls._model_fields = list(cls._fields)
2783
2784        else:
2785            # retrieve fields from parent classes, and duplicate them on cls to
2786            # avoid clashes with inheritance between different models
2787            for name in cls._fields:
2788                delattr(cls, name)
2789            cls._fields = OrderedDict()
2790            for name, field in sorted(getmembers(cls, Field.__instancecheck__), key=lambda f: f[1]._sequence):
2791                # do not retrieve magic, custom and inherited fields
2792                if not any(field.args.get(k) for k in ('automatic', 'manual', 'inherited')):
2793                    self._add_field(name, field.new())
2794            self._add_magic_fields()
2795            cls._model_fields = list(cls._fields)
2796
2797        cls.pool.model_cache[cls._model_classes] = cls
2798
2799        # 2. add manual fields
2800        if self.pool._init_modules:
2801            self.env['ir.model.fields']._add_manual_fields(self)
2802
2803        # 3. make sure that parent models determine their own fields, then add
2804        # inherited fields to cls
2805        self._inherits_check()
2806        for parent in self._inherits:
2807            self.env[parent]._setup_base()
2808        self._add_inherited_fields()
2809
2810        # 4. initialize more field metadata
2811        cls._field_inverses = Collector()   # inverse fields for related fields
2812
2813        cls._setup_done = True
2814
2815        # 5. determine and validate rec_name
2816        if cls._rec_name:
2817            assert cls._rec_name in cls._fields, \
2818                "Invalid _rec_name=%r for model %r" % (cls._rec_name, cls._name)
2819        elif 'name' in cls._fields:
2820            cls._rec_name = 'name'
2821        elif cls._custom and 'x_name' in cls._fields:
2822            cls._rec_name = 'x_name'
2823
2824        # 6. determine and validate active_name
2825        if cls._active_name:
2826            assert (cls._active_name in cls._fields
2827                    and cls._active_name in ('active', 'x_active')), \
2828                ("Invalid _active_name=%r for model %r; only 'active' and "
2829                "'x_active' are supported and the field must be present on "
2830                "the model") % (cls._active_name, cls._name)
2831        elif 'active' in cls._fields:
2832            cls._active_name = 'active'
2833        elif 'x_active' in cls._fields:
2834            cls._active_name = 'x_active'
2835
2836    @api.model
2837    def _setup_fields(self):
2838        """ Setup the fields, except for recomputation triggers. """
2839        cls = type(self)
2840
2841        # set up fields
2842        bad_fields = []
2843        for name, field in cls._fields.items():
2844            try:
2845                field.setup_full(self)
2846            except Exception:
2847                if field.base_field.manual:
2848                    # Something goes wrong when setup a manual field.
2849                    # This can happen with related fields using another manual many2one field
2850                    # that hasn't been loaded because the comodel does not exist yet.
2851                    # This can also be a manual function field depending on not loaded fields yet.
2852                    bad_fields.append(name)
2853                    continue
2854                raise
2855
2856        for name in bad_fields:
2857            self._pop_field(name)
2858
2859    @api.model
2860    def _setup_complete(self):
2861        """ Setup recomputation triggers, and complete the model setup. """
2862        cls = type(self)
2863
2864        # register constraints and onchange methods
2865        cls._init_constraints_onchanges()
2866
2867    @api.model
2868    def fields_get(self, allfields=None, attributes=None):
2869        """ fields_get([fields][, attributes])
2870
2871        Return the definition of each field.
2872
2873        The returned value is a dictionary (indexed by field name) of
2874        dictionaries. The _inherits'd fields are included. The string, help,
2875        and selection (if present) attributes are translated.
2876
2877        :param allfields: list of fields to document, all if empty or not provided
2878        :param attributes: list of description attributes to return for each field, all if empty or not provided
2879        """
2880        has_access = functools.partial(self.check_access_rights, raise_exception=False)
2881        readonly = not (has_access('write') or has_access('create'))
2882
2883        res = {}
2884        for fname, field in self._fields.items():
2885            if allfields and fname not in allfields:
2886                continue
2887            if field.groups and not self.env.su and not self.user_has_groups(field.groups):
2888                continue
2889
2890            description = field.get_description(self.env)
2891            if readonly:
2892                description['readonly'] = True
2893                description['states'] = {}
2894            if attributes:
2895                description = {key: val
2896                               for key, val in description.items()
2897                               if key in attributes}
2898            res[fname] = description
2899
2900        return res
2901
2902    @api.model
2903    def get_empty_list_help(self, help):
2904        """ Generic method giving the help message displayed when having
2905            no result to display in a list or kanban view. By default it returns
2906            the help given in parameter that is generally the help message
2907            defined in the action.
2908        """
2909        return help
2910
2911    @api.model
2912    def check_field_access_rights(self, operation, fields):
2913        """
2914        Check the user access rights on the given fields. This raises Access
2915        Denied if the user does not have the rights. Otherwise it returns the
2916        fields (as is if the fields is not falsy, or the readable/writable
2917        fields if fields is falsy).
2918        """
2919        if self.env.su:
2920            return fields or list(self._fields)
2921
2922        def valid(fname):
2923            """ determine whether user has access to field ``fname`` """
2924            field = self._fields.get(fname)
2925            if field and field.groups:
2926                return self.user_has_groups(field.groups)
2927            else:
2928                return True
2929
2930        if not fields:
2931            fields = [name for name in self._fields if valid(name)]
2932        else:
2933            invalid_fields = {name for name in fields if not valid(name)}
2934            if invalid_fields:
2935                _logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
2936                             operation, self._uid, self._name, ', '.join(invalid_fields))
2937
2938                description = self.env['ir.model']._get(self._name).name
2939                if not self.env.user.has_group('base.group_no_one'):
2940                    raise AccessError(
2941                        _('You do not have enough rights to access the fields "%(fields)s" on %(document_kind)s (%(document_model)s). '\
2942                          'Please contact your system administrator.\n\n(Operation: %(operation)s)') % {
2943                        'fields': ','.join(list(invalid_fields)),
2944                        'document_kind': description,
2945                        'document_model': self._name,
2946                        'operation': operation,
2947                    })
2948
2949                def format_groups(field):
2950                    if field.groups == '.':
2951                        return _("always forbidden")
2952
2953                    anyof = self.env['res.groups']
2954                    noneof = self.env['res.groups']
2955                    for g in field.groups.split(','):
2956                        if g.startswith('!'):
2957                            noneof |= self.env.ref(g[1:])
2958                        else:
2959                            anyof |= self.env.ref(g)
2960                    strs = []
2961                    if anyof:
2962                        strs.append(_("allowed for groups %s") % ', '.join(
2963                            anyof.sorted(lambda g: g.id)
2964                                 .mapped(lambda g: repr(g.display_name))
2965                        ))
2966                    if noneof:
2967                        strs.append(_("forbidden for groups %s") % ', '.join(
2968                            noneof.sorted(lambda g: g.id)
2969                                  .mapped(lambda g: repr(g.display_name))
2970                        ))
2971                    return '; '.join(strs)
2972
2973                raise AccessError(_("""The requested operation can not be completed due to security restrictions.
2974
2975Document type: %(document_kind)s (%(document_model)s)
2976Operation: %(operation)s
2977User: %(user)s
2978Fields:
2979%(fields_list)s""") % {
2980                    'document_model': self._name,
2981                    'document_kind': description or self._name,
2982                    'operation': operation,
2983                    'user': self._uid,
2984                    'fields_list': '\n'.join(
2985                        '- %s (%s)' % (f, format_groups(self._fields[f]))
2986                        for f in sorted(invalid_fields)
2987                    )
2988                })
2989
2990        return fields
2991
2992    def read(self, fields=None, load='_classic_read'):
2993        """ read([fields])
2994
2995        Reads the requested fields for the records in ``self``, low-level/RPC
2996        method. In Python code, prefer :meth:`~.browse`.
2997
2998        :param fields: list of field names to return (default is all fields)
2999        :return: a list of dictionaries mapping field names to their values,
3000                 with one dictionary per record
3001        :raise AccessError: if user has no read rights on some of the given
3002                records
3003        """
3004        fields = self.check_field_access_rights('read', fields)
3005
3006        # fetch stored fields from the database to the cache
3007        stored_fields = set()
3008        for name in fields:
3009            field = self._fields.get(name)
3010            if not field:
3011                raise ValueError("Invalid field %r on model %r" % (name, self._name))
3012            if field.store:
3013                stored_fields.add(name)
3014            elif field.compute:
3015                # optimization: prefetch direct field dependencies
3016                for dotname in field.depends:
3017                    f = self._fields[dotname.split('.')[0]]
3018                    if f.prefetch and (not f.groups or self.user_has_groups(f.groups)):
3019                        stored_fields.add(f.name)
3020        self._read(stored_fields)
3021
3022        return self._read_format(fnames=fields, load=load)
3023
3024    def _read_format(self, fnames, load='_classic_read'):
3025        """Returns a list of dictionaries mapping field names to their values,
3026        with one dictionary per record that exists.
3027
3028        The output format is similar to the one expected from the `read` method.
3029
3030        The current method is different from `read` because it retrieves its
3031        values from the cache without doing a query when it is avoidable.
3032        """
3033        data = [(record, {'id': record._ids[0]}) for record in self]
3034        use_name_get = (load == '_classic_read')
3035        for name in fnames:
3036            convert = self._fields[name].convert_to_read
3037            for record, vals in data:
3038                # missing records have their vals empty
3039                if not vals:
3040                    continue
3041                try:
3042                    vals[name] = convert(record[name], record, use_name_get)
3043                except MissingError:
3044                    vals.clear()
3045        result = [vals for record, vals in data if vals]
3046
3047        return result
3048
3049    def _fetch_field(self, field):
3050        """ Read from the database in order to fetch ``field`` (:class:`Field`
3051            instance) for ``self`` in cache.
3052        """
3053        self.check_field_access_rights('read', [field.name])
3054        # determine which fields can be prefetched
3055        if self._context.get('prefetch_fields', True) and field.prefetch:
3056            fnames = [
3057                name
3058                for name, f in self._fields.items()
3059                # select fields that can be prefetched
3060                if f.prefetch
3061                # discard fields with groups that the user may not access
3062                if not (f.groups and not self.user_has_groups(f.groups))
3063                # discard fields that must be recomputed
3064                if not (f.compute and self.env.records_to_compute(f))
3065            ]
3066            if field.name not in fnames:
3067                fnames.append(field.name)
3068                self = self - self.env.records_to_compute(field)
3069        else:
3070            fnames = [field.name]
3071        self._read(fnames)
3072
3073    def _read(self, fields):
3074        """ Read the given fields of the records in ``self`` from the database,
3075            and store them in cache. Access errors are also stored in cache.
3076            Skip fields that are not stored.
3077
3078            :param field_names: list of column names of model ``self``; all those
3079                fields are guaranteed to be read
3080            :param inherited_field_names: list of column names from parent
3081                models; some of those fields may not be read
3082        """
3083        if not self:
3084            return
3085        self.check_access_rights('read')
3086
3087        # if a read() follows a write(), we must flush updates, as read() will
3088        # fetch from database and overwrites the cache (`test_update_with_id`)
3089        self.flush(fields, self)
3090
3091        field_names = []
3092        inherited_field_names = []
3093        for name in fields:
3094            field = self._fields.get(name)
3095            if field:
3096                if field.store:
3097                    field_names.append(name)
3098                elif field.base_field.store:
3099                    inherited_field_names.append(name)
3100            else:
3101                _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3102
3103        # determine the fields that are stored as columns in tables; ignore 'id'
3104        fields_pre = [
3105            field
3106            for field in (self._fields[name] for name in field_names + inherited_field_names)
3107            if field.name != 'id'
3108            if field.base_field.store and field.base_field.column_type
3109            if not (field.inherited and callable(field.base_field.translate))
3110        ]
3111
3112        if fields_pre:
3113            env = self.env
3114            cr, user, context, su = env.args
3115
3116            # make a query object for selecting ids, and apply security rules to it
3117            query = Query(self.env.cr, self._table, self._table_query)
3118            self._apply_ir_rules(query, 'read')
3119
3120            # the query may involve several tables: we need fully-qualified names
3121            def qualify(field):
3122                col = field.name
3123                res = self._inherits_join_calc(self._table, field.name, query)
3124                if field.type == 'binary' and (context.get('bin_size') or context.get('bin_size_' + col)):
3125                    # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3126                    res = 'pg_size_pretty(length(%s)::bigint)' % res
3127                return '%s as "%s"' % (res, col)
3128
3129            # selected fields are: 'id' followed by fields_pre
3130            qual_names = [qualify(name) for name in [self._fields['id']] + fields_pre]
3131
3132            # determine the actual query to execute (last parameter is added below)
3133            query.add_where('"%s".id IN %%s' % self._table)
3134            query_str, params = query.select(*qual_names)
3135
3136            result = []
3137            for sub_ids in cr.split_for_in_conditions(self.ids):
3138                cr.execute(query_str, params + [sub_ids])
3139                result += cr.fetchall()
3140        else:
3141            self.check_access_rule('read')
3142            result = [(id_,) for id_ in self.ids]
3143
3144        fetched = self.browse()
3145        if result:
3146            cols = zip(*result)
3147            ids = next(cols)
3148            fetched = self.browse(ids)
3149
3150            for field in fields_pre:
3151                values = next(cols)
3152                if context.get('lang') and not field.inherited and callable(field.translate):
3153                    translate = field.get_trans_func(fetched)
3154                    values = list(values)
3155                    for index in range(len(ids)):
3156                        values[index] = translate(ids[index], values[index])
3157
3158                # store values in cache
3159                self.env.cache.update(fetched, field, values)
3160
3161            # determine the fields that must be processed now;
3162            # for the sake of simplicity, we ignore inherited fields
3163            for name in field_names:
3164                field = self._fields[name]
3165                if not field.column_type:
3166                    field.read(fetched)
3167                if field.deprecated:
3168                    _logger.warning('Field %s is deprecated: %s', field, field.deprecated)
3169
3170        # possibly raise exception for the records that could not be read
3171        missing = self - fetched
3172        if missing:
3173            extras = fetched - self
3174            if extras:
3175                raise AccessError(
3176                    _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3177                        missing._ids, extras._ids,
3178                    ))
3179            # mark non-existing records in missing
3180            forbidden = missing.exists()
3181            if forbidden:
3182                raise self.env['ir.rule']._make_access_error('read', forbidden)
3183
3184    def get_metadata(self):
3185        """Return some metadata about the given records.
3186
3187        :return: list of ownership dictionaries for each requested record
3188        :rtype: list of dictionaries with the following keys:
3189
3190            * id: object id
3191            * create_uid: user who created the record
3192            * create_date: date when the record was created
3193            * write_uid: last user who changed the record
3194            * write_date: date of the last change to the record
3195            * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3196            * noupdate: A boolean telling if the record will be updated or not
3197        """
3198
3199        IrModelData = self.env['ir.model.data'].sudo()
3200        if self._log_access:
3201            res = self.sudo().read(LOG_ACCESS_COLUMNS)
3202        else:
3203            res = [{'id': x} for x in self.ids]
3204        xml_data = dict((x['res_id'], x) for x in IrModelData.search_read([('model', '=', self._name),
3205                                                                           ('res_id', 'in', self.ids)],
3206                                                                          ['res_id', 'noupdate', 'module', 'name'],
3207                                                                          order='id',
3208                                                                          limit=1))
3209        for r in res:
3210            value = xml_data.get(r['id'], {})
3211            r['xmlid'] = '%(module)s.%(name)s' % value if value else False
3212            r['noupdate'] = value.get('noupdate', False)
3213        return res
3214
3215    def get_base_url(self):
3216        """
3217        Returns rooturl for a specific given record.
3218
3219        By default, it return the ir.config.parameter of base_url
3220        but it can be overidden by model.
3221
3222        :return: the base url for this record
3223        :rtype: string
3224
3225        """
3226        self.ensure_one()
3227        return self.env['ir.config_parameter'].sudo().get_param('web.base.url')
3228
3229    def _check_concurrency(self):
3230        if not (self._log_access and self._context.get(self.CONCURRENCY_CHECK_FIELD)):
3231            return
3232        check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3233        for sub_ids in self._cr.split_for_in_conditions(self.ids):
3234            nclauses = 0
3235            params = []
3236            for id in sub_ids:
3237                id_ref = "%s,%s" % (self._name, id)
3238                update_date = self._context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3239                if update_date:
3240                    nclauses += 1
3241                    params.extend([id, update_date])
3242            if not nclauses:
3243                continue
3244            query = "SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause] * nclauses))
3245            self._cr.execute(query, tuple(params))
3246            res = self._cr.fetchone()
3247            if res:
3248                # mention the first one only to keep the error message readable
3249                raise ValidationError(_('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3250
3251    def _check_company(self, fnames=None):
3252        """ Check the companies of the values of the given field names.
3253
3254        :param list fnames: names of relational fields to check
3255        :raises UserError: if the `company_id` of the value of any field is not
3256            in `[False, self.company_id]` (or `self` if
3257            :class:`~odoo.addons.base.models.res_company`).
3258
3259        For :class:`~odoo.addons.base.models.res_users` relational fields,
3260        verifies record company is in `company_ids` fields.
3261
3262        User with main company A, having access to company A and B, could be
3263        assigned or linked to records in company B.
3264        """
3265        if fnames is None:
3266            fnames = self._fields
3267
3268        regular_fields = []
3269        property_fields = []
3270        for name in fnames:
3271            field = self._fields[name]
3272            if field.relational and field.check_company and \
3273                    'company_id' in self.env[field.comodel_name]:
3274                if not field.company_dependent:
3275                    regular_fields.append(name)
3276                else:
3277                    property_fields.append(name)
3278
3279        if not (regular_fields or property_fields):
3280            return
3281
3282        inconsistencies = []
3283        for record in self:
3284            company = record.company_id if record._name != 'res.company' else record
3285            # The first part of the check verifies that all records linked via relation fields are compatible
3286            # with the company of the origin document, i.e. `self.account_id.company_id == self.company_id`
3287            for name in regular_fields:
3288                corecord = record.sudo()[name]
3289                # Special case with `res.users` since an user can belong to multiple companies.
3290                if corecord._name == 'res.users' and corecord.company_ids:
3291                    if not (company <= corecord.company_ids):
3292                        inconsistencies.append((record, name, corecord))
3293                elif not (corecord.company_id <= company):
3294                    inconsistencies.append((record, name, corecord))
3295            # The second part of the check (for property / company-dependent fields) verifies that the records
3296            # linked via those relation fields are compatible with the company that owns the property value, i.e.
3297            # the company for which the value is being assigned, i.e:
3298            #      `self.property_account_payable_id.company_id == self.env.company
3299            company = self.env.company
3300            for name in property_fields:
3301                # Special case with `res.users` since an user can belong to multiple companies.
3302                corecord = record.sudo()[name]
3303                if corecord._name == 'res.users' and corecord.company_ids:
3304                    if not (company <= corecord.company_ids):
3305                        inconsistencies.append((record, name, corecord))
3306                elif not (corecord.company_id <= company):
3307                    inconsistencies.append((record, name, corecord))
3308
3309        if inconsistencies:
3310            lines = [_("Incompatible companies on records:")]
3311            company_msg = _("- Record is company %(company)r and %(field)r (%(fname)s: %(values)s) belongs to another company.")
3312            record_msg = _("- %(record)r belongs to company %(company)r and %(field)r (%(fname)s: %(values)s) belongs to another company.")
3313            for record, name, corecords in inconsistencies[:5]:
3314                if record._name == 'res.company':
3315                    msg, company = company_msg, record
3316                else:
3317                    msg, company = record_msg, record.company_id
3318                field = self.env['ir.model.fields']._get(self._name, name)
3319                lines.append(msg % {
3320                    'record': record.display_name,
3321                    'company': company.display_name,
3322                    'field': field.field_description,
3323                    'fname': field.name,
3324                    'values': ", ".join(repr(rec.display_name) for rec in corecords),
3325                })
3326            raise UserError("\n".join(lines))
3327
3328    @api.model
3329    def check_access_rights(self, operation, raise_exception=True):
3330        """ Verifies that the operation given by ``operation`` is allowed for
3331            the current user according to the access rights.
3332        """
3333        return self.env['ir.model.access'].check(self._name, operation, raise_exception)
3334
3335    def check_access_rule(self, operation):
3336        """ Verifies that the operation given by ``operation`` is allowed for
3337            the current user according to ir.rules.
3338
3339           :param operation: one of ``write``, ``unlink``
3340           :raise UserError: * if current ir.rules do not permit this operation.
3341           :return: None if the operation is allowed
3342        """
3343        if self.env.su:
3344            return
3345
3346        # SQL Alternative if computing in-memory is too slow for large dataset
3347        # invalid = self - self._filter_access_rules(operation)
3348        invalid = self - self._filter_access_rules_python(operation)
3349        if not invalid:
3350            return
3351
3352        forbidden = invalid.exists()
3353        if forbidden:
3354            # the invalid records are (partially) hidden by access rules
3355            raise self.env['ir.rule']._make_access_error(operation, forbidden)
3356
3357        # If we get here, the invalid records are not in the database.
3358        if operation in ('read', 'unlink'):
3359            # No need to warn about deleting an already deleted record.
3360            # And no error when reading a record that was deleted, to prevent spurious
3361            # errors for non-transactional search/read sequences coming from clients.
3362            return
3363        _logger.info('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, self._uid, self._name)
3364        raise MissingError(
3365            _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3366            + '\n\n({} {}, {} {}, {} {}, {} {})'.format(
3367                _('Document type:'), self._name, _('Operation:'), operation,
3368                _('Records:'), invalid.ids[:6], _('User:'), self._uid,
3369            )
3370        )
3371
3372    def _filter_access_rules(self, operation):
3373        """ Return the subset of ``self`` for which ``operation`` is allowed. """
3374        if self.env.su:
3375            return self
3376
3377        if not self._ids:
3378            return self
3379
3380        query = Query(self.env.cr, self._table, self._table_query)
3381        self._apply_ir_rules(query, operation)
3382        if not query.where_clause:
3383            return self
3384
3385        # detemine ids in database that satisfy ir.rules
3386        valid_ids = set()
3387        query.add_where(f'"{self._table}".id IN %s')
3388        query_str, params = query.select()
3389        self._flush_search([])
3390        for sub_ids in self._cr.split_for_in_conditions(self.ids):
3391            self._cr.execute(query_str, params + [sub_ids])
3392            valid_ids.update(row[0] for row in self._cr.fetchall())
3393
3394        # return new ids without origin and ids with origin in valid_ids
3395        return self.browse([
3396            it
3397            for it in self._ids
3398            if not (it or it.origin) or (it or it.origin) in valid_ids
3399        ])
3400
3401    def _filter_access_rules_python(self, operation):
3402        dom = self.env['ir.rule']._compute_domain(self._name, operation)
3403        return self.sudo().filtered_domain(dom or [])
3404
3405    def unlink(self):
3406        """ unlink()
3407
3408        Deletes the records of the current set
3409
3410        :raise AccessError: * if user has no unlink rights on the requested object
3411                            * if user tries to bypass access rules for unlink on the requested object
3412        :raise UserError: if the record is default property for other records
3413
3414        """
3415        if not self:
3416            return True
3417
3418        self.check_access_rights('unlink')
3419        self._check_concurrency()
3420
3421        # mark fields that depend on 'self' to recompute them after 'self' has
3422        # been deleted (like updating a sum of lines after deleting one line)
3423        self.flush()
3424        self.modified(self._fields, before=True)
3425
3426        with self.env.norecompute():
3427            self.check_access_rule('unlink')
3428
3429            cr = self._cr
3430            Data = self.env['ir.model.data'].sudo().with_context({})
3431            Defaults = self.env['ir.default'].sudo()
3432            Property = self.env['ir.property'].sudo()
3433            Attachment = self.env['ir.attachment'].sudo()
3434            ir_model_data_unlink = Data
3435            ir_attachment_unlink = Attachment
3436
3437            # TOFIX: this avoids an infinite loop when trying to recompute a
3438            # field, which triggers the recomputation of another field using the
3439            # same compute function, which then triggers again the computation
3440            # of those two fields
3441            for field in self._fields.values():
3442                self.env.remove_to_compute(field, self)
3443
3444            for sub_ids in cr.split_for_in_conditions(self.ids):
3445                # Check if the records are used as default properties.
3446                refs = ['%s,%s' % (self._name, i) for i in sub_ids]
3447                if Property.search([('res_id', '=', False), ('value_reference', 'in', refs)], limit=1):
3448                    raise UserError(_('Unable to delete this document because it is used as a default property'))
3449
3450                # Delete the records' properties.
3451                Property.search([('res_id', 'in', refs)]).unlink()
3452
3453                query = "DELETE FROM %s WHERE id IN %%s" % self._table
3454                cr.execute(query, (sub_ids,))
3455
3456                # Removing the ir_model_data reference if the record being deleted
3457                # is a record created by xml/csv file, as these are not connected
3458                # with real database foreign keys, and would be dangling references.
3459                #
3460                # Note: the following steps are performed as superuser to avoid
3461                # access rights restrictions, and with no context to avoid possible
3462                # side-effects during admin calls.
3463                data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])
3464                if data:
3465                    ir_model_data_unlink |= data
3466
3467                # For the same reason, remove the defaults having some of the
3468                # records as value
3469                Defaults.discard_records(self.browse(sub_ids))
3470
3471                # For the same reason, remove the relevant records in ir_attachment
3472                # (the search is performed with sql as the search method of
3473                # ir_attachment is overridden to hide attachments of deleted
3474                # records)
3475                query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'
3476                cr.execute(query, (self._name, sub_ids))
3477                attachments = Attachment.browse([row[0] for row in cr.fetchall()])
3478                if attachments:
3479                    ir_attachment_unlink |= attachments.sudo()
3480
3481            # invalidate the *whole* cache, since the orm does not handle all
3482            # changes made in the database, like cascading delete!
3483            self.invalidate_cache()
3484            if ir_model_data_unlink:
3485                ir_model_data_unlink.unlink()
3486            if ir_attachment_unlink:
3487                ir_attachment_unlink.unlink()
3488            # DLE P93: flush after the unlink, for recompute fields depending on
3489            # the modified of the unlink
3490            self.flush()
3491
3492        # auditing: deletions are infrequent and leave no trace in the database
3493        _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)
3494
3495        return True
3496
3497    def write(self, vals):
3498        """ write(vals)
3499
3500        Updates all records in the current set with the provided values.
3501
3502        :param dict vals: fields to update and the value to set on them e.g::
3503
3504                {'foo': 1, 'bar': "Qux"}
3505
3506            will set the field ``foo`` to ``1`` and the field ``bar`` to
3507            ``"Qux"`` if those are valid (otherwise it will trigger an error).
3508
3509        :raise AccessError: * if user has no write rights on the requested object
3510                            * if user tries to bypass access rules for write on the requested object
3511        :raise ValidationError: if user tries to enter invalid value for a field that is not in selection
3512        :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3513
3514        * For numeric fields (:class:`~odoo.fields.Integer`,
3515          :class:`~odoo.fields.Float`) the value should be of the
3516          corresponding type
3517        * For :class:`~odoo.fields.Boolean`, the value should be a
3518          :class:`python:bool`
3519        * For :class:`~odoo.fields.Selection`, the value should match the
3520          selection values (generally :class:`python:str`, sometimes
3521          :class:`python:int`)
3522        * For :class:`~odoo.fields.Many2one`, the value should be the
3523          database identifier of the record to set
3524        * Other non-relational fields use a string for value
3525
3526          .. danger::
3527
3528              for historical and compatibility reasons,
3529              :class:`~odoo.fields.Date` and
3530              :class:`~odoo.fields.Datetime` fields use strings as values
3531              (written and read) rather than :class:`~python:datetime.date` or
3532              :class:`~python:datetime.datetime`. These date strings are
3533              UTC-only and formatted according to
3534              :const:`odoo.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
3535              :const:`odoo.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
3536        * .. _openerp/models/relationals/format:
3537
3538          :class:`~odoo.fields.One2many` and
3539          :class:`~odoo.fields.Many2many` use a special "commands" format to
3540          manipulate the set of records stored in/associated with the field.
3541
3542          This format is a list of triplets executed sequentially, where each
3543          triplet is a command to execute on the set of records. Not all
3544          commands apply in all situations. Possible commands are:
3545
3546          ``(0, 0, values)``
3547              adds a new record created from the provided ``value`` dict.
3548          ``(1, id, values)``
3549              updates an existing record of id ``id`` with the values in
3550              ``values``. Can not be used in :meth:`~.create`.
3551          ``(2, id, 0)``
3552              removes the record of id ``id`` from the set, then deletes it
3553              (from the database). Can not be used in :meth:`~.create`.
3554          ``(3, id, 0)``
3555              removes the record of id ``id`` from the set, but does not
3556              delete it. Can not be used in
3557              :meth:`~.create`.
3558          ``(4, id, 0)``
3559              adds an existing record of id ``id`` to the set.
3560          ``(5, 0, 0)``
3561              removes all records from the set, equivalent to using the
3562              command ``3`` on every record explicitly. Can not be used in
3563              :meth:`~.create`.
3564          ``(6, 0, ids)``
3565              replaces all existing records in the set by the ``ids`` list,
3566              equivalent to using the command ``5`` followed by a command
3567              ``4`` for each ``id`` in ``ids``.
3568        """
3569        if not self:
3570            return True
3571
3572        self.check_access_rights('write')
3573        self.check_field_access_rights('write', vals.keys())
3574        self.check_access_rule('write')
3575        env = self.env
3576
3577        bad_names = {'id', 'parent_path'}
3578        if self._log_access:
3579            # the superuser can set log_access fields while loading registry
3580            if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
3581                bad_names.update(LOG_ACCESS_COLUMNS)
3582
3583        determine_inverses = defaultdict(list)      # {inverse: fields}
3584        records_to_inverse = {}                     # {field: records}
3585        relational_names = []
3586        protected = set()
3587        check_company = False
3588        for fname in vals:
3589            field = self._fields.get(fname)
3590            if not field:
3591                raise ValueError("Invalid field %r on model %r" % (fname, self._name))
3592            if field.inverse:
3593                if field.type in ('one2many', 'many2many'):
3594                    # The written value is a list of commands that must applied
3595                    # on the field's current value. Because the field is
3596                    # protected while being written, the field's current value
3597                    # will not be computed and default to an empty recordset. So
3598                    # make sure the field's value is in cache before writing, in
3599                    # order to avoid an inconsistent update.
3600                    self[fname]
3601                determine_inverses[field.inverse].append(field)
3602                # DLE P150: `test_cancel_propagation`, `test_manufacturing_3_steps`, `test_manufacturing_flow`
3603                # TODO: check whether still necessary
3604                records_to_inverse[field] = self.filtered('id')
3605            if field.relational or self._field_inverses[field]:
3606                relational_names.append(fname)
3607            if field.inverse or (field.compute and not field.readonly):
3608                if field.store or field.type not in ('one2many', 'many2many'):
3609                    # Protect the field from being recomputed while being
3610                    # inversed. In the case of non-stored x2many fields, the
3611                    # field's value may contain unexpeced new records (created
3612                    # by command 0). Those new records are necessary for
3613                    # inversing the field, but should no longer appear if the
3614                    # field is recomputed afterwards. Not protecting the field
3615                    # will automatically invalidate the field from the cache,
3616                    # forcing its value to be recomputed once dependencies are
3617                    # up-to-date.
3618                    protected.update(self.pool.field_computed.get(field, [field]))
3619            if fname == 'company_id' or (field.relational and field.check_company):
3620                check_company = True
3621
3622        # protect fields being written against recomputation
3623        with env.protecting(protected, self):
3624            # Determine records depending on values. When modifying a relational
3625            # field, you have to recompute what depends on the field's values
3626            # before and after modification.  This is because the modification
3627            # has an impact on the "data path" between a computed field and its
3628            # dependency.  Note that this double call to modified() is only
3629            # necessary for relational fields.
3630            #
3631            # It is best explained with a simple example: consider two sales
3632            # orders SO1 and SO2.  The computed total amount on sales orders
3633            # indirectly depends on the many2one field 'order_id' linking lines
3634            # to their sales order.  Now consider the following code:
3635            #
3636            #   line = so1.line_ids[0]      # pick a line from SO1
3637            #   line.order_id = so2         # move the line to SO2
3638            #
3639            # In this situation, the total amount must be recomputed on *both*
3640            # sales order: the line's order before the modification, and the
3641            # line's order after the modification.
3642            self.modified(relational_names, before=True)
3643
3644            real_recs = self.filtered('id')
3645
3646            # If there are only fields that do not trigger _write (e.g. only
3647            # determine inverse), the below ensures that `write_date` and
3648            # `write_uid` are updated (`test_orm.py`, `test_write_date`)
3649            if self._log_access and self.ids:
3650                towrite = env.all.towrite[self._name]
3651                for record in real_recs:
3652                    towrite[record.id]['write_uid'] = self.env.uid
3653                    towrite[record.id]['write_date'] = False
3654                self.env.cache.invalidate([
3655                    (self._fields['write_date'], self.ids),
3656                    (self._fields['write_uid'], self.ids),
3657                ])
3658
3659            # for monetary field, their related currency field must be cached
3660            # before the amount so it can be rounded correctly
3661            for fname in sorted(vals, key=lambda x: self._fields[x].type=='monetary'):
3662                if fname in bad_names:
3663                    continue
3664                field = self._fields[fname]
3665                field.write(self, vals[fname])
3666
3667            # determine records depending on new values
3668            #
3669            # Call modified after write, because the modified can trigger a
3670            # search which can trigger a flush which can trigger a recompute
3671            # which remove the field from the recompute list while all the
3672            # values required for the computation could not be yet in cache.
3673            # e.g. Write on `name` of `res.partner` trigger the recompute of
3674            # `display_name`, which triggers a search on child_ids to find the
3675            # childs to which the display_name must be recomputed, which
3676            # triggers the flush of `display_name` because the _order of
3677            # res.partner includes display_name. The computation of display_name
3678            # is then done too soon because the parent_id was not yet written.
3679            # (`test_01_website_reset_password_tour`)
3680            self.modified(vals)
3681
3682            if self._parent_store and self._parent_name in vals:
3683                self.flush([self._parent_name])
3684
3685            # validate non-inversed fields first
3686            inverse_fields = [f.name for fs in determine_inverses.values() for f in fs]
3687            real_recs._validate_fields(vals, inverse_fields)
3688
3689            for fields in determine_inverses.values():
3690                # write again on non-stored fields that have been invalidated from cache
3691                for field in fields:
3692                    if not field.store and any(self.env.cache.get_missing_ids(real_recs, field)):
3693                        field.write(real_recs, vals[field.name])
3694
3695                # inverse records that are not being computed
3696                try:
3697                    fields[0].determine_inverse(real_recs)
3698                except AccessError as e:
3699                    if fields[0].inherited:
3700                        description = self.env['ir.model']._get(self._name).name
3701                        raise AccessError(
3702                            _("%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).") % {
3703                                'previous_message': e.args[0],
3704                                'document_kind': description,
3705                                'document_model': self._name,
3706                            }
3707                        )
3708                    raise
3709
3710            # validate inversed fields
3711            real_recs._validate_fields(inverse_fields)
3712
3713        if check_company and self._check_company_auto:
3714            self._check_company()
3715        return True
3716
3717    def _write(self, vals):
3718        # low-level implementation of write()
3719        if not self:
3720            return True
3721
3722        self._check_concurrency()
3723        cr = self._cr
3724
3725        # determine records that require updating parent_path
3726        parent_records = self._parent_store_update_prepare(vals)
3727
3728        # determine SQL values
3729        columns = []                    # list of (column_name, format, value)
3730
3731        for name, val in sorted(vals.items()):
3732            if self._log_access and name in LOG_ACCESS_COLUMNS and not val:
3733                continue
3734            field = self._fields[name]
3735            assert field.store
3736
3737            if field.deprecated:
3738                _logger.warning('Field %s is deprecated: %s', field, field.deprecated)
3739
3740            assert field.column_type
3741            columns.append((name, field.column_format, val))
3742
3743        if self._log_access:
3744            if not vals.get('write_uid'):
3745                columns.append(('write_uid', '%s', self._uid))
3746            if not vals.get('write_date'):
3747                columns.append(('write_date', '%s', AsIs("(now() at time zone 'UTC')")))
3748
3749        # update columns
3750        if columns:
3751            query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
3752                self._table, ','.join('"%s"=%s' % (column[0], column[1]) for column in columns),
3753            )
3754            params = [column[2] for column in columns]
3755            for sub_ids in cr.split_for_in_conditions(set(self.ids)):
3756                cr.execute(query, params + [sub_ids])
3757                if cr.rowcount != len(sub_ids):
3758                    raise MissingError(
3759                        _('One of the records you are trying to modify has already been deleted (Document type: %s).', self._description)
3760                        + '\n\n({} {}, {} {})'.format(_('Records:'), sub_ids[:6], _('User:'), self._uid)
3761                    )
3762
3763        # update parent_path
3764        if parent_records:
3765            parent_records._parent_store_update()
3766
3767        return True
3768
3769    @api.model_create_multi
3770    @api.returns('self', lambda value: value.id)
3771    def create(self, vals_list):
3772        """ create(vals_list) -> records
3773
3774        Creates new records for the model.
3775
3776        The new records are initialized using the values from the list of dicts
3777        ``vals_list``, and if necessary those from :meth:`~.default_get`.
3778
3779        :param list vals_list:
3780            values for the model's fields, as a list of dictionaries::
3781
3782                [{'field_name': field_value, ...}, ...]
3783
3784            For backward compatibility, ``vals_list`` may be a dictionary.
3785            It is treated as a singleton list ``[vals]``, and a single record
3786            is returned.
3787
3788            see :meth:`~.write` for details
3789
3790        :return: the created records
3791        :raise AccessError: * if user has no create rights on the requested object
3792                            * if user tries to bypass access rules for create on the requested object
3793        :raise ValidationError: if user tries to enter invalid value for a field that is not in selection
3794        :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3795        """
3796        if not vals_list:
3797            return self.browse()
3798
3799        self = self.browse()
3800        self.check_access_rights('create')
3801
3802        bad_names = {'id', 'parent_path'}
3803        if self._log_access:
3804            # the superuser can set log_access fields while loading registry
3805            if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
3806                bad_names.update(LOG_ACCESS_COLUMNS)
3807
3808        # classify fields for each record
3809        data_list = []
3810        inversed_fields = set()
3811
3812        for vals in vals_list:
3813            # add missing defaults
3814            vals = self._add_missing_default_values(vals)
3815
3816            # distribute fields into sets for various purposes
3817            data = {}
3818            data['stored'] = stored = {}
3819            data['inversed'] = inversed = {}
3820            data['inherited'] = inherited = defaultdict(dict)
3821            data['protected'] = protected = set()
3822            for key, val in vals.items():
3823                if key in bad_names:
3824                    continue
3825                field = self._fields.get(key)
3826                if not field:
3827                    raise ValueError("Invalid field %r on model %r" % (key, self._name))
3828                if field.company_dependent:
3829                    irprop_def = self.env['ir.property']._get(key, self._name)
3830                    cached_def = field.convert_to_cache(irprop_def, self)
3831                    cached_val = field.convert_to_cache(val, self)
3832                    if cached_val == cached_def:
3833                        # val is the same as the default value defined in
3834                        # 'ir.property'; by design, 'ir.property' will not
3835                        # create entries specific to these records; skipping the
3836                        # field inverse saves 4 SQL queries
3837                        continue
3838                if field.store:
3839                    stored[key] = val
3840                if field.inherited:
3841                    inherited[field.related_field.model_name][key] = val
3842                elif field.inverse:
3843                    inversed[key] = val
3844                    inversed_fields.add(field)
3845                # protect non-readonly computed fields against (re)computation
3846                if field.compute and not field.readonly:
3847                    protected.update(self.pool.field_computed.get(field, [field]))
3848
3849            data_list.append(data)
3850
3851        # create or update parent records
3852        for model_name, parent_name in self._inherits.items():
3853            parent_data_list = []
3854            for data in data_list:
3855                if not data['stored'].get(parent_name):
3856                    parent_data_list.append(data)
3857                elif data['inherited'][model_name]:
3858                    parent = self.env[model_name].browse(data['stored'][parent_name])
3859                    parent.write(data['inherited'][model_name])
3860
3861            if parent_data_list:
3862                parents = self.env[model_name].create([
3863                    data['inherited'][model_name]
3864                    for data in parent_data_list
3865                ])
3866                for parent, data in zip(parents, parent_data_list):
3867                    data['stored'][parent_name] = parent.id
3868
3869        # create records with stored fields
3870        records = self._create(data_list)
3871
3872        # protect fields being written against recomputation
3873        protected = [(data['protected'], data['record']) for data in data_list]
3874        with self.env.protecting(protected):
3875            # group fields by inverse method (to call it once), and order groups
3876            # by dependence (in case they depend on each other)
3877            field_groups = (fields for _inv, fields in groupby(inversed_fields, attrgetter('inverse')))
3878            for fields in field_groups:
3879                # determine which records to inverse for those fields
3880                inv_names = {field.name for field in fields}
3881                rec_vals = [
3882                    (data['record'], {
3883                        name: data['inversed'][name]
3884                        for name in inv_names
3885                        if name in data['inversed']
3886                    })
3887                    for data in data_list
3888                    if not inv_names.isdisjoint(data['inversed'])
3889                ]
3890
3891                # If a field is not stored, its inverse method will probably
3892                # write on its dependencies, which will invalidate the field on
3893                # all records. We therefore inverse the field record by record.
3894                if all(field.store or field.company_dependent for field in fields):
3895                    batches = [rec_vals]
3896                else:
3897                    batches = [[rec_data] for rec_data in rec_vals]
3898
3899                for batch in batches:
3900                    for record, vals in batch:
3901                        record._update_cache(vals)
3902                    batch_recs = self.concat(*(record for record, vals in batch))
3903                    fields[0].determine_inverse(batch_recs)
3904
3905        # check Python constraints for non-stored inversed fields
3906        for data in data_list:
3907            data['record']._validate_fields(data['inversed'], data['stored'])
3908
3909        if self._check_company_auto:
3910            records._check_company()
3911        return records
3912
3913    @api.model
3914    def _create(self, data_list):
3915        """ Create records from the stored field values in ``data_list``. """
3916        assert data_list
3917        cr = self.env.cr
3918        quote = '"{}"'.format
3919
3920        # insert rows
3921        ids = []                        # ids of created records
3922        other_fields = set()            # non-column fields
3923        translated_fields = set()       # translated fields
3924
3925        # column names, formats and values (for common fields)
3926        columns0 = [('id', "nextval(%s)", self._sequence)]
3927        if self._log_access:
3928            columns0.append(('create_uid', "%s", self._uid))
3929            columns0.append(('create_date', "%s", AsIs("(now() at time zone 'UTC')")))
3930            columns0.append(('write_uid', "%s", self._uid))
3931            columns0.append(('write_date', "%s", AsIs("(now() at time zone 'UTC')")))
3932
3933        for data in data_list:
3934            # determine column values
3935            stored = data['stored']
3936            columns = [column for column in columns0 if column[0] not in stored]
3937            for name, val in sorted(stored.items()):
3938                field = self._fields[name]
3939                assert field.store
3940
3941                if field.column_type:
3942                    col_val = field.convert_to_column(val, self, stored)
3943                    columns.append((name, field.column_format, col_val))
3944                    if field.translate is True:
3945                        translated_fields.add(field)
3946                else:
3947                    other_fields.add(field)
3948
3949            # Insert rows one by one
3950            # - as records don't all specify the same columns, code building batch-insert query
3951            #   was very complex
3952            # - and the gains were low, so not worth spending so much complexity
3953            #
3954            # It also seems that we have to be careful with INSERTs in batch, because they have the
3955            # same problem as SELECTs:
3956            # If we inject a lot of data in a single query, we fall into pathological perfs in
3957            # terms of SQL parser and the execution of the query itself.
3958            # In SELECT queries, we inject max 1000 ids (integers) when we can, because we know
3959            # that this limit is well managed by PostgreSQL.
3960            # In INSERT queries, we inject integers (small) and larger data (TEXT blocks for
3961            # example).
3962            #
3963            # The problem then becomes: how to "estimate" the right size of the batch to have
3964            # good performance?
3965            #
3966            # This requires extensive testing, and it was prefered not to introduce INSERTs in
3967            # batch, to avoid regressions as much as possible.
3968            #
3969            # That said, we haven't closed the door completely.
3970            query = "INSERT INTO {} ({}) VALUES ({}) RETURNING id".format(
3971                quote(self._table),
3972                ", ".join(quote(name) for name, fmt, val in columns),
3973                ", ".join(fmt for name, fmt, val in columns),
3974            )
3975            params = [val for name, fmt, val in columns]
3976            cr.execute(query, params)
3977            ids.append(cr.fetchone()[0])
3978
3979        # put the new records in cache, and update inverse fields, for many2one
3980        #
3981        # cachetoclear is an optimization to avoid modified()'s cost until other_fields are processed
3982        cachetoclear = []
3983        records = self.browse(ids)
3984        inverses_update = defaultdict(list)     # {(field, value): ids}
3985        for data, record in zip(data_list, records):
3986            data['record'] = record
3987            # DLE P104: test_inherit.py, test_50_search_one2many
3988            vals = dict({k: v for d in data['inherited'].values() for k, v in d.items()}, **data['stored'])
3989            set_vals = list(vals) + LOG_ACCESS_COLUMNS + [self.CONCURRENCY_CHECK_FIELD, 'id', 'parent_path']
3990            for field in self._fields.values():
3991                if field.type in ('one2many', 'many2many'):
3992                    self.env.cache.set(record, field, ())
3993                elif field.related and not field.column_type:
3994                    self.env.cache.set(record, field, field.convert_to_cache(None, record))
3995                # DLE P123: `test_adv_activity`, `test_message_assignation_inbox`, `test_message_log`, `test_create_mail_simple`, ...
3996                # Set `mail.message.parent_id` to False in cache so it doesn't do the useless SELECT when computing the modified of `child_ids`
3997                # in other words, if `parent_id` is not set, no other message `child_ids` are impacted.
3998                # + avoid the fetch of fields which are False. e.g. if a boolean field is not passed in vals and as no default set in the field attributes,
3999                # then we know it can be set to False in the cache in the case of a create.
4000                elif field.name not in set_vals and not field.compute:
4001                    self.env.cache.set(record, field, field.convert_to_cache(None, record))
4002            for fname, value in vals.items():
4003                field = self._fields[fname]
4004                if field.type in ('one2many', 'many2many'):
4005                    cachetoclear.append((record, field))
4006                else:
4007                    cache_value = field.convert_to_cache(value, record)
4008                    self.env.cache.set(record, field, cache_value)
4009                    if field.type in ('many2one', 'many2one_reference') and record._field_inverses[field]:
4010                        inverses_update[(field, cache_value)].append(record.id)
4011
4012        for (field, value), record_ids in inverses_update.items():
4013            field._update_inverses(self.browse(record_ids), value)
4014
4015        # update parent_path
4016        records._parent_store_create()
4017
4018        # protect fields being written against recomputation
4019        protected = [(data['protected'], data['record']) for data in data_list]
4020        with self.env.protecting(protected):
4021            # mark computed fields as todo
4022            records.modified(self._fields, create=True)
4023
4024            if other_fields:
4025                # discard default values from context for other fields
4026                others = records.with_context(clean_context(self._context))
4027                for field in sorted(other_fields, key=attrgetter('_sequence')):
4028                    field.create([
4029                        (other, data['stored'][field.name])
4030                        for other, data in zip(others, data_list)
4031                        if field.name in data['stored']
4032                    ])
4033
4034                # mark fields to recompute
4035                records.modified([field.name for field in other_fields], create=True)
4036
4037            # if value in cache has not been updated by other_fields, remove it
4038            for record, field in cachetoclear:
4039                if self.env.cache.contains(record, field) and not self.env.cache.get(record, field):
4040                    self.env.cache.remove(record, field)
4041
4042        # check Python constraints for stored fields
4043        records._validate_fields(name for data in data_list for name in data['stored'])
4044        records.check_access_rule('create')
4045
4046        # add translations
4047        if self.env.lang and self.env.lang != 'en_US':
4048            Translations = self.env['ir.translation']
4049            for field in translated_fields:
4050                tname = "%s,%s" % (field.model_name, field.name)
4051                for data in data_list:
4052                    if field.name in data['stored']:
4053                        record = data['record']
4054                        val = data['stored'][field.name]
4055                        Translations._set_ids(tname, 'model', self.env.lang, record.ids, val, val)
4056
4057        return records
4058
4059    def _compute_field_value(self, field):
4060        # This is for base automation, to have something to override to catch
4061        # the changes of values for stored compute fields.
4062        if isinstance(field.compute, str):
4063            getattr(self, field.compute)()
4064        else:
4065            field.compute(self)
4066
4067        if field.store and any(self._ids):
4068            # check constraints of the fields that have been computed
4069            fnames = [f.name for f in self.pool.field_computed[field]]
4070            self.filtered('id')._validate_fields(fnames)
4071
4072    def _parent_store_create(self):
4073        """ Set the parent_path field on ``self`` after its creation. """
4074        if not self._parent_store:
4075            return
4076
4077        query = """
4078            UPDATE {0} node
4079            SET parent_path=concat((SELECT parent.parent_path FROM {0} parent
4080                                    WHERE parent.id=node.{1}), node.id, '/')
4081            WHERE node.id IN %s
4082        """.format(self._table, self._parent_name)
4083        self._cr.execute(query, [tuple(self.ids)])
4084
4085    def _parent_store_update_prepare(self, vals):
4086        """ Return the records in ``self`` that must update their parent_path
4087            field. This must be called before updating the parent field.
4088        """
4089        if not self._parent_store or self._parent_name not in vals:
4090            return self.browse()
4091
4092        # No need to recompute the values if the parent is the same.
4093        parent_val = vals[self._parent_name]
4094        if parent_val:
4095            query = """ SELECT id FROM {0}
4096                        WHERE id IN %s AND ({1} != %s OR {1} IS NULL) """
4097            params = [tuple(self.ids), parent_val]
4098        else:
4099            query = """ SELECT id FROM {0}
4100                        WHERE id IN %s AND {1} IS NOT NULL """
4101            params = [tuple(self.ids)]
4102        query = query.format(self._table, self._parent_name)
4103        self._cr.execute(query, params)
4104        return self.browse([row[0] for row in self._cr.fetchall()])
4105
4106    def _parent_store_update(self):
4107        """ Update the parent_path field of ``self``. """
4108        cr = self.env.cr
4109
4110        # determine new prefix of parent_path
4111        query = """
4112            SELECT parent.parent_path FROM {0} node, {0} parent
4113            WHERE node.id = %s AND parent.id = node.{1}
4114        """
4115        cr.execute(query.format(self._table, self._parent_name), [self.ids[0]])
4116        prefix = cr.fetchone()[0] if cr.rowcount else ''
4117
4118        # check for recursion
4119        if prefix:
4120            parent_ids = {int(label) for label in prefix.split('/')[:-1]}
4121            if not parent_ids.isdisjoint(self._ids):
4122                raise UserError(_("Recursion Detected."))
4123
4124        # update parent_path of all records and their descendants
4125        query = """
4126            UPDATE {0} child
4127            SET parent_path = concat(%s, substr(child.parent_path,
4128                    length(node.parent_path) - length(node.id || '/') + 1))
4129            FROM {0} node
4130            WHERE node.id IN %s
4131            AND child.parent_path LIKE concat(node.parent_path, '%%')
4132            RETURNING child.id
4133        """
4134        cr.execute(query.format(self._table), [prefix, tuple(self.ids)])
4135        modified_ids = {row[0] for row in cr.fetchall()}
4136        self.browse(modified_ids).modified(['parent_path'])
4137
4138    def _load_records_write(self, values):
4139        self.write(values)
4140
4141    def _load_records_create(self, values):
4142        return self.create(values)
4143
4144    def _load_records(self, data_list, update=False):
4145        """ Create or update records of this model, and assign XMLIDs.
4146
4147            :param data_list: list of dicts with keys `xml_id` (XMLID to
4148                assign), `noupdate` (flag on XMLID), `values` (field values)
4149            :param update: should be ``True`` when upgrading a module
4150
4151            :return: the records corresponding to ``data_list``
4152        """
4153        original_self = self.browse()
4154        # records created during installation should not display messages
4155        self = self.with_context(install_mode=True)
4156        imd = self.env['ir.model.data'].sudo()
4157
4158        # The algorithm below partitions 'data_list' into three sets: the ones
4159        # to create, the ones to update, and the others. For each set, we assign
4160        # data['record'] for each data. All those records are then retrieved for
4161        # the result.
4162
4163        # determine existing xml_ids
4164        xml_ids = [data['xml_id'] for data in data_list if data.get('xml_id')]
4165        existing = {
4166            ("%s.%s" % row[1:3]): row
4167            for row in imd._lookup_xmlids(xml_ids, self)
4168        }
4169
4170        # determine which records to create and update
4171        to_create = []                  # list of data
4172        to_update = []                  # list of data
4173        imd_data_list = []              # list of data for _update_xmlids()
4174
4175        for data in data_list:
4176            xml_id = data.get('xml_id')
4177            if not xml_id:
4178                vals = data['values']
4179                if vals.get('id'):
4180                    data['record'] = self.browse(vals['id'])
4181                    to_update.append(data)
4182                elif not update:
4183                    to_create.append(data)
4184                continue
4185            row = existing.get(xml_id)
4186            if not row:
4187                to_create.append(data)
4188                continue
4189            d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
4190            record = self.browse(d_res_id)
4191            if r_id:
4192                data['record'] = record
4193                imd_data_list.append(data)
4194                if not (update and d_noupdate):
4195                    to_update.append(data)
4196            else:
4197                imd.browse(d_id).unlink()
4198                to_create.append(data)
4199
4200        # update existing records
4201        for data in to_update:
4202            data['record']._load_records_write(data['values'])
4203
4204        # check for records to create with an XMLID from another module
4205        module = self.env.context.get('install_module')
4206        if module:
4207            prefix = module + "."
4208            for data in to_create:
4209                if data.get('xml_id') and not data['xml_id'].startswith(prefix):
4210                    _logger.warning("Creating record %s in module %s.", data['xml_id'], module)
4211
4212        # create records
4213        records = self._load_records_create([data['values'] for data in to_create])
4214        for data, record in zip(to_create, records):
4215            data['record'] = record
4216            if data.get('xml_id'):
4217                # add XML ids for parent records that have just been created
4218                for parent_model, parent_field in self._inherits.items():
4219                    if not data['values'].get(parent_field):
4220                        imd_data_list.append({
4221                            'xml_id': f"{data['xml_id']}_{parent_model.replace('.', '_')}",
4222                            'record': record[parent_field],
4223                            'noupdate': data.get('noupdate', False),
4224                        })
4225                imd_data_list.append(data)
4226
4227        # create or update XMLIDs
4228        imd._update_xmlids(imd_data_list, update)
4229
4230        return original_self.concat(*(data['record'] for data in data_list))
4231
4232    # TODO: ameliorer avec NULL
4233    @api.model
4234    def _where_calc(self, domain, active_test=True):
4235        """Computes the WHERE clause needed to implement an OpenERP domain.
4236        :param domain: the domain to compute
4237        :type domain: list
4238        :param active_test: whether the default filtering of records with ``active``
4239                            field set to ``False`` should be applied.
4240        :return: the query expressing the given domain as provided in domain
4241        :rtype: osv.query.Query
4242        """
4243        # if the object has an active field ('active', 'x_active'), filter out all
4244        # inactive records unless they were explicitely asked for
4245        if self._active_name and active_test and self._context.get('active_test', True):
4246            # the item[0] trick below works for domain items and '&'/'|'/'!'
4247            # operators too
4248            if not any(item[0] == self._active_name for item in domain):
4249                domain = [(self._active_name, '=', 1)] + domain
4250
4251        if domain:
4252            return expression.expression(domain, self).query
4253        else:
4254            return Query(self.env.cr, self._table, self._table_query)
4255
4256    def _check_qorder(self, word):
4257        if not regex_order.match(word):
4258            raise UserError(_(
4259                'Invalid "order" specified (%s). A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)',
4260                word,
4261            ))
4262        return True
4263
4264    @api.model
4265    def _apply_ir_rules(self, query, mode='read'):
4266        """Add what's missing in ``query`` to implement all appropriate ir.rules
4267          (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4268
4269           :param query: the current query object
4270        """
4271        if self.env.su:
4272            return
4273
4274        # apply main rules on the object
4275        Rule = self.env['ir.rule']
4276        domain = Rule._compute_domain(self._name, mode)
4277        if domain:
4278            expression.expression(domain, self.sudo(), self._table, query)
4279
4280        # apply ir.rules from the parents (through _inherits)
4281        for parent_model_name in self._inherits:
4282            domain = Rule._compute_domain(parent_model_name, mode)
4283            if domain:
4284                parent_model = self.env[parent_model_name]
4285                parent_alias = self._inherits_join_add(self, parent_model_name, query)
4286                expression.expression(domain, parent_model.sudo(), parent_alias, query)
4287
4288    @api.model
4289    def _generate_translated_field(self, table_alias, field, query):
4290        """
4291        Add possibly missing JOIN with translations table to ``query`` and
4292        generate the expression for the translated field.
4293
4294        :return: the qualified field name (or expression) to use for ``field``
4295        """
4296        if self.env.lang:
4297            alias = query.left_join(
4298                table_alias, 'id', 'ir_translation', 'res_id', field,
4299                extra='"{rhs}"."type" = \'model\' AND "{rhs}"."name" = %s AND "{rhs}"."lang" = %s AND "{rhs}"."value" != %s',
4300                extra_params=["%s,%s" % (self._name, field), self.env.lang, ""],
4301            )
4302            return 'COALESCE("%s"."%s", "%s"."%s")' % (alias, 'value', table_alias, field)
4303        else:
4304            return '"%s"."%s"' % (table_alias, field)
4305
4306    @api.model
4307    def _generate_m2o_order_by(self, alias, order_field, query, reverse_direction, seen):
4308        """
4309        Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4310        either native m2o fields or function/related fields that are stored, including
4311        intermediate JOINs for inheritance if required.
4312
4313        :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4314        """
4315        field = self._fields[order_field]
4316        if field.inherited:
4317            # also add missing joins for reaching the table containing the m2o field
4318            qualified_field = self._inherits_join_calc(alias, order_field, query)
4319            alias, order_field = qualified_field.replace('"', '').split('.', 1)
4320            field = field.base_field
4321
4322        assert field.type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4323        if not field.store:
4324            _logger.debug("Many2one function/related fields must be stored "
4325                          "to be used as ordering fields! Ignoring sorting for %s.%s",
4326                          self._name, order_field)
4327            return []
4328
4329        # figure out the applicable order_by for the m2o
4330        dest_model = self.env[field.comodel_name]
4331        m2o_order = dest_model._order
4332        if not regex_order.match(m2o_order):
4333            # _order is complex, can't use it here, so we default to _rec_name
4334            m2o_order = dest_model._rec_name
4335
4336        # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4337        # as we don't want to exclude results that have NULL values for the m2o
4338        dest_alias = query.left_join(alias, order_field, dest_model._table, 'id', order_field)
4339        return dest_model._generate_order_by_inner(dest_alias, m2o_order, query,
4340                                                   reverse_direction, seen)
4341
4342    @api.model
4343    def _generate_order_by_inner(self, alias, order_spec, query, reverse_direction=False, seen=None):
4344        if seen is None:
4345            seen = set()
4346        self._check_qorder(order_spec)
4347
4348        order_by_elements = []
4349        for order_part in order_spec.split(','):
4350            order_split = order_part.strip().split(' ')
4351            order_field = order_split[0].strip()
4352            order_direction = order_split[1].strip().upper() if len(order_split) == 2 else ''
4353            if reverse_direction:
4354                order_direction = 'ASC' if order_direction == 'DESC' else 'DESC'
4355            do_reverse = order_direction == 'DESC'
4356
4357            field = self._fields.get(order_field)
4358            if not field:
4359                raise ValueError("Invalid field %r on model %r" % (order_field, self._name))
4360
4361            if order_field == 'id':
4362                order_by_elements.append('"%s"."%s" %s' % (alias, order_field, order_direction))
4363            else:
4364                if field.inherited:
4365                    field = field.base_field
4366                if field.store and field.type == 'many2one':
4367                    key = (field.model_name, field.comodel_name, order_field)
4368                    if key not in seen:
4369                        seen.add(key)
4370                        order_by_elements += self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
4371                elif field.store and field.column_type:
4372                    qualifield_name = self._inherits_join_calc(alias, order_field, query)
4373                    if field.type == 'boolean':
4374                        qualifield_name = "COALESCE(%s, false)" % qualifield_name
4375                    order_by_elements.append("%s %s" % (qualifield_name, order_direction))
4376                else:
4377                    _logger.warning("Model %r cannot be sorted on field %r (not a column)", self._name, order_field)
4378                    continue  # ignore non-readable or "non-joinable" fields
4379
4380        return order_by_elements
4381
4382    @api.model
4383    def _generate_order_by(self, order_spec, query):
4384        """
4385        Attempt to construct an appropriate ORDER BY clause based on order_spec, which must be
4386        a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4387
4388        :raise ValueError in case order_spec is malformed
4389        """
4390        order_by_clause = ''
4391        order_spec = order_spec or self._order
4392        if order_spec:
4393            order_by_elements = self._generate_order_by_inner(self._table, order_spec, query)
4394            if order_by_elements:
4395                order_by_clause = ",".join(order_by_elements)
4396
4397        return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4398
4399    @api.model
4400    def _flush_search(self, domain, fields=None, order=None, seen=None):
4401        """ Flush all the fields appearing in `domain`, `fields` and `order`. """
4402        if seen is None:
4403            seen = set()
4404        elif self._name in seen:
4405            return
4406        seen.add(self._name)
4407
4408        to_flush = defaultdict(set)             # {model_name: field_names}
4409        if fields:
4410            to_flush[self._name].update(fields)
4411        # also take into account the fields in the record rules
4412        domain = list(domain) + (self.env['ir.rule']._compute_domain(self._name, 'read') or [])
4413        for arg in domain:
4414            if isinstance(arg, str):
4415                continue
4416            if not isinstance(arg[0], str):
4417                continue
4418            model_name = self._name
4419            for fname in arg[0].split('.'):
4420                field = self.env[model_name]._fields.get(fname)
4421                if not field:
4422                    break
4423                to_flush[model_name].add(fname)
4424                # DLE P111: `test_message_process_email_partner_find`
4425                # Search on res.users with email_normalized in domain
4426                # must trigger the recompute and flush of res.partner.email_normalized
4427                if field.related_field:
4428                    model = self
4429                    # DLE P129: `test_transit_multi_companies`
4430                    # `self.env['stock.picking'].search([('product_id', '=', product.id)])`
4431                    # Should flush `stock.move.picking_ids` as `product_id` on `stock.picking` is defined as:
4432                    # `product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id', readonly=False)`
4433                    for f in field.related:
4434                        rfield = model._fields.get(f)
4435                        if rfield:
4436                            to_flush[model._name].add(f)
4437                            if rfield.type in ('many2one', 'one2many', 'many2many'):
4438                                model = self.env[rfield.comodel_name]
4439                                if rfield.type == 'one2many' and rfield.inverse_name:
4440                                    to_flush[rfield.comodel_name].add(rfield.inverse_name)
4441                if field.comodel_name:
4442                    model_name = field.comodel_name
4443            # hierarchy operators need the parent field
4444            if arg[1] in ('child_of', 'parent_of'):
4445                model = self.env[model_name]
4446                if model._parent_store:
4447                    to_flush[model_name].add(model._parent_name)
4448
4449        # flush the order fields
4450        order_spec = order or self._order
4451        for order_part in order_spec.split(','):
4452            order_field = order_part.split()[0]
4453            field = self._fields.get(order_field)
4454            if field is not None:
4455                to_flush[self._name].add(order_field)
4456                if field.relational:
4457                    self.env[field.comodel_name]._flush_search([], seen=seen)
4458
4459        if self._active_name:
4460            to_flush[self._name].add(self._active_name)
4461
4462        # flush model dependencies (recursively)
4463        if self._depends:
4464            models = [self]
4465            while models:
4466                model = models.pop()
4467                for model_name, field_names in model._depends.items():
4468                    to_flush[model_name].update(field_names)
4469                    models.append(self.env[model_name])
4470
4471        for model_name, field_names in to_flush.items():
4472            self.env[model_name].flush(field_names)
4473
4474    @api.model
4475    def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
4476        """
4477        Private implementation of search() method, allowing specifying the uid to use for the access right check.
4478        This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4479        by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4480        This is ok at the security level because this method is private and not callable through XML-RPC.
4481
4482        :param access_rights_uid: optional user ID to use when checking access rights
4483                                  (not for ir.rules, this is only for ir.model.access)
4484        :return: a list of record ids or an integer (if count is True)
4485        """
4486        model = self.with_user(access_rights_uid) if access_rights_uid else self
4487        model.check_access_rights('read')
4488
4489        if expression.is_false(self, args):
4490            # optimization: no need to query, as no record satisfies the domain
4491            return 0 if count else []
4492
4493        # the flush must be done before the _where_calc(), as the latter can do some selects
4494        self._flush_search(args, order=order)
4495
4496        query = self._where_calc(args)
4497        self._apply_ir_rules(query, 'read')
4498
4499        if count:
4500            # Ignore order, limit and offset when just counting, they don't make sense and could
4501            # hurt performance
4502            query_str, params = query.select("count(1)")
4503            self._cr.execute(query_str, params)
4504            res = self._cr.fetchone()
4505            return res[0]
4506
4507        query.order = self._generate_order_by(order, query).replace('ORDER BY ', '')
4508        query.limit = limit
4509        query.offset = offset
4510
4511        return query
4512
4513    @api.returns(None, lambda value: value[0])
4514    def copy_data(self, default=None):
4515        """
4516        Copy given record's data with all its fields values
4517
4518        :param default: field values to override in the original values of the copied record
4519        :return: list with a dictionary containing all the field values
4520        """
4521        # In the old API, this method took a single id and return a dict. When
4522        # invoked with the new API, it returned a list of dicts.
4523        self.ensure_one()
4524
4525        # avoid recursion through already copied records in case of circular relationship
4526        if '__copy_data_seen' not in self._context:
4527            self = self.with_context(__copy_data_seen=defaultdict(set))
4528        seen_map = self._context['__copy_data_seen']
4529        if self.id in seen_map[self._name]:
4530            return
4531        seen_map[self._name].add(self.id)
4532
4533        default = dict(default or [])
4534
4535        # build a black list of fields that should not be copied
4536        blacklist = set(MAGIC_COLUMNS + ['parent_path'])
4537        whitelist = set(name for name, field in self._fields.items() if not field.inherited)
4538
4539        def blacklist_given_fields(model):
4540            # blacklist the fields that are given by inheritance
4541            for parent_model, parent_field in model._inherits.items():
4542                blacklist.add(parent_field)
4543                if parent_field in default:
4544                    # all the fields of 'parent_model' are given by the record:
4545                    # default[parent_field], except the ones redefined in self
4546                    blacklist.update(set(self.env[parent_model]._fields) - whitelist)
4547                else:
4548                    blacklist_given_fields(self.env[parent_model])
4549            # blacklist deprecated fields
4550            for name, field in model._fields.items():
4551                if field.deprecated:
4552                    blacklist.add(name)
4553
4554        blacklist_given_fields(self)
4555
4556        fields_to_copy = {name: field
4557                          for name, field in self._fields.items()
4558                          if field.copy and name not in default and name not in blacklist}
4559
4560        for name, field in fields_to_copy.items():
4561            if field.type == 'one2many':
4562                # duplicate following the order of the ids because we'll rely on
4563                # it later for copying translations in copy_translation()!
4564                lines = [rec.copy_data()[0] for rec in self[name].sorted(key='id')]
4565                # the lines are duplicated using the wrong (old) parent, but then
4566                # are reassigned to the correct one thanks to the (0, 0, ...)
4567                default[name] = [(0, 0, line) for line in lines if line]
4568            elif field.type == 'many2many':
4569                default[name] = [(6, 0, self[name].ids)]
4570            else:
4571                default[name] = field.convert_to_write(self[name], self)
4572
4573        return [default]
4574
4575    def copy_translations(old, new, excluded=()):
4576        """ Recursively copy the translations from original to new record
4577
4578        :param old: the original record
4579        :param new: the new record (copy of the original one)
4580        :param excluded: a container of user-provided field names
4581        """
4582        # avoid recursion through already copied records in case of circular relationship
4583        if '__copy_translations_seen' not in old._context:
4584            old = old.with_context(__copy_translations_seen=defaultdict(set))
4585        seen_map = old._context['__copy_translations_seen']
4586        if old.id in seen_map[old._name]:
4587            return
4588        seen_map[old._name].add(old.id)
4589
4590        def get_trans(field, old, new):
4591            """ Return the 'name' of the translations to search for, together
4592                with the record ids corresponding to ``old`` and ``new``.
4593            """
4594            if field.inherited:
4595                pname = field.related[0]
4596                return get_trans(field.related_field, old[pname], new[pname])
4597            return "%s,%s" % (field.model_name, field.name), old.id, new.id
4598
4599        # removing the lang to compare untranslated values
4600        old_wo_lang, new_wo_lang = (old + new).with_context(lang=None)
4601        Translation = old.env['ir.translation']
4602
4603        for name, field in old._fields.items():
4604            if not field.copy:
4605                continue
4606
4607            if field.inherited and field.related[0] in excluded:
4608                # inherited fields that come from a user-provided parent record
4609                # must not copy translations, as the parent record is not a copy
4610                # of the old parent record
4611                continue
4612
4613            if field.type == 'one2many' and field.name not in excluded:
4614                # we must recursively copy the translations for o2m; here we
4615                # rely on the order of the ids to match the translations as
4616                # foreseen in copy_data()
4617                old_lines = old[name].sorted(key='id')
4618                new_lines = new[name].sorted(key='id')
4619                for (old_line, new_line) in zip(old_lines, new_lines):
4620                    # don't pass excluded as it is not about those lines
4621                    old_line.copy_translations(new_line)
4622
4623            elif field.translate:
4624                # for translatable fields we copy their translations
4625                trans_name, source_id, target_id = get_trans(field, old, new)
4626                domain = [('name', '=', trans_name), ('res_id', '=', source_id)]
4627                new_val = new_wo_lang[name]
4628                if old.env.lang and callable(field.translate):
4629                    # the new value *without lang* must be the old value without lang
4630                    new_wo_lang[name] = old_wo_lang[name]
4631                vals_list = []
4632                for vals in Translation.search_read(domain):
4633                    del vals['id']
4634                    del vals['module']      # duplicated vals is not linked to any module
4635                    vals['res_id'] = target_id
4636                    if not callable(field.translate):
4637                        vals['src'] = new_wo_lang[name]
4638                    if vals['lang'] == old.env.lang and field.translate is True:
4639                        # update master record if the new_val was not changed by copy override
4640                        if new_val == old[name]:
4641                            new_wo_lang[name] = old_wo_lang[name]
4642                            vals['src'] = old_wo_lang[name]
4643                        # the value should be the new value (given by copy())
4644                        vals['value'] = new_val
4645                    vals_list.append(vals)
4646                Translation._upsert_translations(vals_list)
4647
4648    @api.returns('self', lambda value: value.id)
4649    def copy(self, default=None):
4650        """ copy(default=None)
4651
4652        Duplicate record ``self`` updating it with default values
4653
4654        :param dict default: dictionary of field values to override in the
4655               original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
4656        :returns: new record
4657
4658        """
4659        self.ensure_one()
4660        vals = self.with_context(active_test=False).copy_data(default)[0]
4661        # To avoid to create a translation in the lang of the user, copy_translation will do it
4662        new = self.with_context(lang=None).create(vals)
4663        self.with_context(from_copy_translation=True).copy_translations(new, excluded=default or ())
4664        return new
4665
4666    @api.returns('self')
4667    def exists(self):
4668        """  exists() -> records
4669
4670        Returns the subset of records in ``self`` that exist, and marks deleted
4671        records as such in cache. It can be used as a test on records::
4672
4673            if record.exists():
4674                ...
4675
4676        By convention, new records are returned as existing.
4677        """
4678        ids, new_ids = [], []
4679        for i in self._ids:
4680            (new_ids if isinstance(i, NewId) else ids).append(i)
4681        if not ids:
4682            return self
4683        query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4684        self._cr.execute(query, [tuple(ids)])
4685        valid_ids = set([r[0] for r in self._cr.fetchall()] + new_ids)
4686        return self.browse(i for i in self._ids if i in valid_ids)
4687
4688    def _check_recursion(self, parent=None):
4689        """
4690        Verifies that there is no loop in a hierarchical structure of records,
4691        by following the parent relationship using the **parent** field until a
4692        loop is detected or until a top-level record is found.
4693
4694        :param parent: optional parent field name (default: ``self._parent_name``)
4695        :return: **True** if no loop was found, **False** otherwise.
4696        """
4697        if not parent:
4698            parent = self._parent_name
4699
4700        # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4701        cr = self._cr
4702        self.flush([parent])
4703        query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4704        for id in self.ids:
4705            current_id = id
4706            while current_id:
4707                cr.execute(query, (current_id,))
4708                result = cr.fetchone()
4709                current_id = result[0] if result else None
4710                if current_id == id:
4711                    return False
4712        return True
4713
4714    def _check_m2m_recursion(self, field_name):
4715        """
4716        Verifies that there is no loop in a directed graph of records, by
4717        following a many2many relationship with the given field name.
4718
4719        :param field_name: field to check
4720        :return: **True** if no loop was found, **False** otherwise.
4721        """
4722        field = self._fields.get(field_name)
4723        if not (field and field.type == 'many2many' and
4724                field.comodel_name == self._name and field.store):
4725            # field must be a many2many on itself
4726            raise ValueError('invalid field_name: %r' % (field_name,))
4727
4728        self.flush([field_name])
4729
4730        cr = self._cr
4731        query = 'SELECT "%s", "%s" FROM "%s" WHERE "%s" IN %%s AND "%s" IS NOT NULL' % \
4732                    (field.column1, field.column2, field.relation, field.column1, field.column2)
4733
4734        succs = defaultdict(set)        # transitive closure of successors
4735        preds = defaultdict(set)        # transitive closure of predecessors
4736        todo, done = set(self.ids), set()
4737        while todo:
4738            # retrieve the respective successors of the nodes in 'todo'
4739            cr.execute(query, [tuple(todo)])
4740            done.update(todo)
4741            todo.clear()
4742            for id1, id2 in cr.fetchall():
4743                # connect id1 and its predecessors to id2 and its successors
4744                for x, y in itertools.product([id1] + list(preds[id1]),
4745                                              [id2] + list(succs[id2])):
4746                    if x == y:
4747                        return False    # we found a cycle here!
4748                    succs[x].add(y)
4749                    preds[y].add(x)
4750                if id2 not in done:
4751                    todo.add(id2)
4752        return True
4753
4754    def _get_external_ids(self):
4755        """Retrieve the External ID(s) of any database record.
4756
4757        **Synopsis**: ``_get_external_ids() -> { 'id': ['module.external_id'] }``
4758
4759        :return: map of ids to the list of their fully qualified External IDs
4760                 in the form ``module.key``, or an empty list when there's no External
4761                 ID for a record, e.g.::
4762
4763                     { 'id': ['module.ext_id', 'module.ext_id_bis'],
4764                       'id2': [] }
4765        """
4766        result = {record.id: [] for record in self}
4767        domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
4768        for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id'], order='id'):
4769            result[data['res_id']].append('%(module)s.%(name)s' % data)
4770        return result
4771
4772    def get_external_id(self):
4773        """Retrieve the External ID of any database record, if there
4774        is one. This method works as a possible implementation
4775        for a function field, to be able to add it to any
4776        model object easily, referencing it as ``Model.get_external_id``.
4777
4778        When multiple External IDs exist for a record, only one
4779        of them is returned (randomly).
4780
4781        :return: map of ids to their fully qualified XML ID,
4782                 defaulting to an empty string when there's none
4783                 (to be usable as a function field),
4784                 e.g.::
4785
4786                     { 'id': 'module.ext_id',
4787                       'id2': '' }
4788        """
4789        results = self._get_external_ids()
4790        return {key: val[0] if val else ''
4791                for key, val in results.items()}
4792
4793    # backwards compatibility
4794    get_xml_id = get_external_id
4795    _get_xml_ids = _get_external_ids
4796
4797    # Transience
4798    @classmethod
4799    def is_transient(cls):
4800        """ Return whether the model is transient.
4801
4802        See :class:`TransientModel`.
4803
4804        """
4805        return cls._transient
4806
4807    @api.model
4808    def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None):
4809        """Perform a :meth:`search` followed by a :meth:`read`.
4810
4811        :param domain: Search domain, see ``args`` parameter in :meth:`search`.
4812            Defaults to an empty domain that will match all records.
4813        :param fields: List of fields to read, see ``fields`` parameter in :meth:`read`.
4814            Defaults to all fields.
4815        :param int offset: Number of records to skip, see ``offset`` parameter in :meth:`search`.
4816            Defaults to 0.
4817        :param int limit: Maximum number of records to return, see ``limit`` parameter in :meth:`search`.
4818            Defaults to no limit.
4819        :param order: Columns to sort result, see ``order`` parameter in :meth:`search`.
4820            Defaults to no sort.
4821        :return: List of dictionaries containing the asked fields.
4822        :rtype: list(dict).
4823        """
4824        records = self.search(domain or [], offset=offset, limit=limit, order=order)
4825        if not records:
4826            return []
4827
4828        if fields and fields == ['id']:
4829            # shortcut read if we only want the ids
4830            return [{'id': record.id} for record in records]
4831
4832        # read() ignores active_test, but it would forward it to any downstream search call
4833        # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
4834        # was presumably only meant for the main search().
4835        # TODO: Move this to read() directly?
4836        if 'active_test' in self._context:
4837            context = dict(self._context)
4838            del context['active_test']
4839            records = records.with_context(context)
4840
4841        result = records.read(fields)
4842        if len(result) <= 1:
4843            return result
4844
4845        # reorder read
4846        index = {vals['id']: vals for vals in result}
4847        return [index[record.id] for record in records if record.id in index]
4848
4849    def toggle_active(self):
4850        """ Inverse the value of the field ``(x_)active`` on the records in ``self``. """
4851        active_recs = self.filtered(self._active_name)
4852        active_recs[self._active_name] = False
4853        (self - active_recs)[self._active_name] = True
4854
4855    def action_archive(self):
4856        """ Set (x_)active=False on a recordset, by calling toggle_active to
4857            take the corresponding actions according to the model
4858        """
4859        return self.filtered(lambda record: record[self._active_name]).toggle_active()
4860
4861    def action_unarchive(self):
4862        """ Set (x_)active=True on a recordset, by calling toggle_active to
4863            take the corresponding actions according to the model
4864        """
4865        return self.filtered(lambda record: not record[self._active_name]).toggle_active()
4866
4867    def _register_hook(self):
4868        """ stuff to do right after the registry is built """
4869        pass
4870
4871    def _unregister_hook(self):
4872        """ Clean up what `~._register_hook` has done. """
4873        pass
4874
4875    @classmethod
4876    def _patch_method(cls, name, method):
4877        """ Monkey-patch a method for all instances of this model. This replaces
4878            the method called ``name`` by ``method`` in the given class.
4879            The original method is then accessible via ``method.origin``, and it
4880            can be restored with :meth:`~._revert_method`.
4881
4882            Example::
4883
4884                def do_write(self, values):
4885                    # do stuff, and call the original method
4886                    return do_write.origin(self, values)
4887
4888                # patch method write of model
4889                model._patch_method('write', do_write)
4890
4891                # this will call do_write
4892                records = model.search([...])
4893                records.write(...)
4894
4895                # restore the original method
4896                model._revert_method('write')
4897        """
4898        origin = getattr(cls, name)
4899        method.origin = origin
4900        # propagate decorators from origin to method, and apply api decorator
4901        wrapped = api.propagate(origin, method)
4902        wrapped.origin = origin
4903        setattr(cls, name, wrapped)
4904
4905    @classmethod
4906    def _revert_method(cls, name):
4907        """ Revert the original method called ``name`` in the given class.
4908            See :meth:`~._patch_method`.
4909        """
4910        method = getattr(cls, name)
4911        setattr(cls, name, method.origin)
4912
4913    #
4914    # Instance creation
4915    #
4916    # An instance represents an ordered collection of records in a given
4917    # execution environment. The instance object refers to the environment, and
4918    # the records themselves are represented by their cache dictionary. The 'id'
4919    # of each record is found in its corresponding cache dictionary.
4920    #
4921    # This design has the following advantages:
4922    #  - cache access is direct and thus fast;
4923    #  - one can consider records without an 'id' (see new records);
4924    #  - the global cache is only an index to "resolve" a record 'id'.
4925    #
4926
4927    @classmethod
4928    def _browse(cls, env, ids, prefetch_ids):
4929        """ Create a recordset instance.
4930
4931        :param env: an environment
4932        :param ids: a tuple of record ids
4933        :param prefetch_ids: a collection of record ids (for prefetching)
4934        """
4935        records = object.__new__(cls)
4936        records.env = env
4937        records._ids = ids
4938        records._prefetch_ids = prefetch_ids
4939        return records
4940
4941    def browse(self, ids=None):
4942        """ browse([ids]) -> records
4943
4944        Returns a recordset for the ids provided as parameter in the current
4945        environment.
4946
4947        .. code-block:: python
4948
4949            self.browse([7, 18, 12])
4950            res.partner(7, 18, 12)
4951
4952        :param ids: id(s)
4953        :type ids: int or list(int) or None
4954        :return: recordset
4955        """
4956        if not ids:
4957            ids = ()
4958        elif ids.__class__ in IdType:
4959            ids = (ids,)
4960        else:
4961            ids = tuple(ids)
4962        return self._browse(self.env, ids, ids)
4963
4964    #
4965    # Internal properties, for manipulating the instance's implementation
4966    #
4967
4968    @property
4969    def ids(self):
4970        """ Return the list of actual record ids corresponding to ``self``. """
4971        return list(origin_ids(self._ids))
4972
4973    # backward-compatibility with former browse records
4974    _cr = property(lambda self: self.env.cr)
4975    _uid = property(lambda self: self.env.uid)
4976    _context = property(lambda self: self.env.context)
4977
4978    #
4979    # Conversion methods
4980    #
4981
4982    def ensure_one(self):
4983        """Verify that the current recorset holds a single record.
4984
4985        :raise odoo.exceptions.ValueError: ``len(self) != 1``
4986        """
4987        try:
4988            # unpack to ensure there is only one value is faster than len when true and
4989            # has a significant impact as this check is largely called
4990            _id, = self._ids
4991            return self
4992        except ValueError:
4993            raise ValueError("Expected singleton: %s" % self)
4994
4995    def with_env(self, env):
4996        """Return a new version of this recordset attached to the provided environment.
4997
4998        :param env:
4999        :type env: :class:`~odoo.api.Environment`
5000
5001        .. warning::
5002            The new environment will not benefit from the current
5003            environment's data cache, so later data access may incur extra
5004            delays while re-fetching from the database.
5005            The returned recordset has the same prefetch object as ``self``.
5006        """
5007        return self._browse(env, self._ids, self._prefetch_ids)
5008
5009    def sudo(self, flag=True):
5010        """ sudo([flag=True])
5011
5012        Returns a new version of this recordset with superuser mode enabled or
5013        disabled, depending on `flag`. The superuser mode does not change the
5014        current user, and simply bypasses access rights checks.
5015
5016        .. warning::
5017
5018            Using ``sudo`` could cause data access to cross the
5019            boundaries of record rules, possibly mixing records that
5020            are meant to be isolated (e.g. records from different
5021            companies in multi-company environments).
5022
5023            It may lead to un-intuitive results in methods which select one
5024            record among many - for example getting the default company, or
5025            selecting a Bill of Materials.
5026
5027        .. note::
5028
5029            Because the record rules and access control will have to be
5030            re-evaluated, the new recordset will not benefit from the current
5031            environment's data cache, so later data access may incur extra
5032            delays while re-fetching from the database.
5033            The returned recordset has the same prefetch object as ``self``.
5034
5035        """
5036        if not isinstance(flag, bool):
5037            _logger.warning("deprecated use of sudo(user), use with_user(user) instead", stack_info=True)
5038            return self.with_user(flag)
5039        return self.with_env(self.env(su=flag))
5040
5041    def with_user(self, user):
5042        """ with_user(user)
5043
5044        Return a new version of this recordset attached to the given user, in
5045        non-superuser mode, unless `user` is the superuser (by convention, the
5046        superuser is always in superuser mode.)
5047        """
5048        if not user:
5049            return self
5050        return self.with_env(self.env(user=user, su=False))
5051
5052    def with_company(self, company):
5053        """ with_company(company)
5054
5055        Return a new version of this recordset with a modified context, such that::
5056
5057            result.env.company = company
5058            result.env.companies = self.env.companies | company
5059
5060        :param company: main company of the new environment.
5061        :type company: :class:`~odoo.addons.base.models.res_company` or int
5062
5063        .. warning::
5064
5065            When using an unauthorized company for current user,
5066            accessing the company(ies) on the environment may trigger
5067            an AccessError if not done in a sudoed environment.
5068        """
5069        if not company:
5070            # With company = None/False/0/[]/empty recordset: keep current environment
5071            return self
5072
5073        company_id = int(company)
5074        allowed_company_ids = self.env.context.get('allowed_company_ids', [])
5075        if allowed_company_ids and company_id == allowed_company_ids[0]:
5076            return self
5077        # Copy the allowed_company_ids list
5078        # to avoid modifying the context of the current environment.
5079        allowed_company_ids = list(allowed_company_ids)
5080        if company_id in allowed_company_ids:
5081            allowed_company_ids.remove(company_id)
5082        allowed_company_ids.insert(0, company_id)
5083
5084        return self.with_context(allowed_company_ids=allowed_company_ids)
5085
5086    def with_context(self, *args, **kwargs):
5087        """ with_context([context][, **overrides]) -> records
5088
5089        Returns a new version of this recordset attached to an extended
5090        context.
5091
5092        The extended context is either the provided ``context`` in which
5093        ``overrides`` are merged or the *current* context in which
5094        ``overrides`` are merged e.g.::
5095
5096            # current context is {'key1': True}
5097            r2 = records.with_context({}, key2=True)
5098            # -> r2._context is {'key2': True}
5099            r2 = records.with_context(key2=True)
5100            # -> r2._context is {'key1': True, 'key2': True}
5101
5102        .. note:
5103
5104            The returned recordset has the same prefetch object as ``self``.
5105        """
5106        if (args and 'force_company' in args[0]) or 'force_company' in kwargs:
5107            _logger.warning(
5108                "Context key 'force_company' is no longer supported. "
5109                "Use with_company(company) instead.",
5110                stack_info=True,
5111            )
5112        if (args and 'company' in args[0]) or 'company' in kwargs:
5113            _logger.warning(
5114                "Context key 'company' is not recommended, because "
5115                "of its special meaning in @depends_context.",
5116                stack_info=True,
5117            )
5118        context = dict(args[0] if args else self._context, **kwargs)
5119        if 'allowed_company_ids' not in context and 'allowed_company_ids' in self._context:
5120            # Force 'allowed_company_ids' to be kept when context is overridden
5121            # without 'allowed_company_ids'
5122            context['allowed_company_ids'] = self._context['allowed_company_ids']
5123        return self.with_env(self.env(context=context))
5124
5125    def with_prefetch(self, prefetch_ids=None):
5126        """ with_prefetch([prefetch_ids]) -> records
5127
5128        Return a new version of this recordset that uses the given prefetch ids,
5129        or ``self``'s ids if not given.
5130        """
5131        if prefetch_ids is None:
5132            prefetch_ids = self._ids
5133        return self._browse(self.env, self._ids, prefetch_ids)
5134
5135    def _update_cache(self, values, validate=True):
5136        """ Update the cache of ``self`` with ``values``.
5137
5138            :param values: dict of field values, in any format.
5139            :param validate: whether values must be checked
5140        """
5141        def is_monetary(pair):
5142            return pair[0].type == 'monetary'
5143
5144        self.ensure_one()
5145        cache = self.env.cache
5146        fields = self._fields
5147        try:
5148            field_values = [(fields[name], value) for name, value in values.items()]
5149        except KeyError as e:
5150            raise ValueError("Invalid field %r on model %r" % (e.args[0], self._name))
5151
5152        # convert monetary fields last in order to ensure proper rounding
5153        for field, value in sorted(field_values, key=is_monetary):
5154            cache.set(self, field, field.convert_to_cache(value, self, validate))
5155
5156            # set inverse fields on new records in the comodel
5157            if field.relational:
5158                inv_recs = self[field.name].filtered(lambda r: not r.id)
5159                if not inv_recs:
5160                    continue
5161                for invf in self._field_inverses[field]:
5162                    # DLE P98: `test_40_new_fields`
5163                    # /home/dle/src/odoo/master-nochange-fp/odoo/addons/test_new_api/tests/test_new_fields.py
5164                    # Be careful to not break `test_onchange_taxes_1`, `test_onchange_taxes_2`, `test_onchange_taxes_3`
5165                    # If you attempt to find a better solution
5166                    for inv_rec in inv_recs:
5167                        if not cache.contains(inv_rec, invf):
5168                            val = invf.convert_to_cache(self, inv_rec, validate=False)
5169                            cache.set(inv_rec, invf, val)
5170                        else:
5171                            invf._update(inv_rec, self)
5172
5173    def _convert_to_record(self, values):
5174        """ Convert the ``values`` dictionary from the cache format to the
5175        record format.
5176        """
5177        return {
5178            name: self._fields[name].convert_to_record(value, self)
5179            for name, value in values.items()
5180        }
5181
5182    def _convert_to_write(self, values):
5183        """ Convert the ``values`` dictionary into the format of :meth:`write`. """
5184        fields = self._fields
5185        result = {}
5186        for name, value in values.items():
5187            if name in fields:
5188                field = fields[name]
5189                value = field.convert_to_write(value, self)
5190                if not isinstance(value, NewId):
5191                    result[name] = value
5192        return result
5193
5194    #
5195    # Record traversal and update
5196    #
5197
5198    def _mapped_func(self, func):
5199        """ Apply function ``func`` on all records in ``self``, and return the
5200            result as a list or a recordset (if ``func`` returns recordsets).
5201        """
5202        if self:
5203            vals = [func(rec) for rec in self]
5204            if isinstance(vals[0], BaseModel):
5205                return vals[0].union(*vals)         # union of all recordsets
5206            return vals
5207        else:
5208            vals = func(self)
5209            return vals if isinstance(vals, BaseModel) else []
5210
5211    def mapped(self, func):
5212        """Apply ``func`` on all records in ``self``, and return the result as a
5213        list or a recordset (if ``func`` return recordsets). In the latter
5214        case, the order of the returned recordset is arbitrary.
5215
5216        :param func: a function or a dot-separated sequence of field names
5217        :type func: callable or str
5218        :return: self if func is falsy, result of func applied to all ``self`` records.
5219        :rtype: list or recordset
5220
5221        .. code-block:: python3
5222
5223            # returns a list of summing two fields for each record in the set
5224            records.mapped(lambda r: r.field1 + r.field2)
5225
5226        The provided function can be a string to get field values:
5227
5228        .. code-block:: python3
5229
5230            # returns a list of names
5231            records.mapped('name')
5232
5233            # returns a recordset of partners
5234            records.mapped('partner_id')
5235
5236            # returns the union of all partner banks, with duplicates removed
5237            records.mapped('partner_id.bank_ids')
5238        """
5239        if not func:
5240            return self                 # support for an empty path of fields
5241        if isinstance(func, str):
5242            recs = self
5243            for name in func.split('.'):
5244                recs = recs._fields[name].mapped(recs)
5245            return recs
5246        else:
5247            return self._mapped_func(func)
5248
5249    def _mapped_cache(self, name_seq):
5250        """ Same as `~.mapped`, but ``name_seq`` is a dot-separated sequence of
5251            field names, and only cached values are used.
5252        """
5253        recs = self
5254        for name in name_seq.split('.'):
5255            field = recs._fields[name]
5256            null = field.convert_to_cache(False, self, validate=False)
5257            if recs:
5258                recs = recs.mapped(lambda rec: field.convert_to_record(rec._cache.get(name, null), rec))
5259            else:
5260                recs = field.convert_to_record(null, recs)
5261        return recs
5262
5263    def filtered(self, func):
5264        """Return the records in ``self`` satisfying ``func``.
5265
5266        :param func: a function or a dot-separated sequence of field names
5267        :type func: callable or str
5268        :return: recordset of records satisfying func, may be empty.
5269
5270        .. code-block:: python3
5271
5272            # only keep records whose company is the current user's
5273            records.filtered(lambda r: r.company_id == user.company_id)
5274
5275            # only keep records whose partner is a company
5276            records.filtered("partner_id.is_company")
5277        """
5278        if isinstance(func, str):
5279            name = func
5280            func = lambda rec: any(rec.mapped(name))
5281            # populate cache
5282            self.mapped(name)
5283        return self.browse([rec.id for rec in self if func(rec)])
5284
5285    def filtered_domain(self, domain):
5286        if not domain: return self
5287        result = []
5288        for d in reversed(domain):
5289            if d == '|':
5290                result.append(result.pop() | result.pop())
5291            elif d == '!':
5292                result.append(self - result.pop())
5293            elif d == '&':
5294                result.append(result.pop() & result.pop())
5295            elif d == expression.TRUE_LEAF:
5296                result.append(self)
5297            elif d == expression.FALSE_LEAF:
5298                result.append(self.browse())
5299            else:
5300                (key, comparator, value) = d
5301                if comparator in ('child_of', 'parent_of'):
5302                    result.append(self.search([('id', 'in', self.ids), d]))
5303                    continue
5304                if key.endswith('.id'):
5305                    key = key[:-3]
5306                if key == 'id':
5307                    key = ''
5308                # determine the field with the final type for values
5309                field = None
5310                if key:
5311                    model = self.browse()
5312                    for fname in key.split('.'):
5313                        field = model._fields[fname]
5314                        model = model[fname]
5315                if comparator in ('like', 'ilike', '=like', '=ilike', 'not ilike', 'not like'):
5316                    value_esc = value.replace('_', '?').replace('%', '*').replace('[', '?')
5317                records_ids = OrderedSet()
5318                for rec in self:
5319                    data = rec.mapped(key)
5320                    if isinstance(data, BaseModel):
5321                        v = value
5322                        if (isinstance(value, list) or isinstance(value, tuple)) and len(value):
5323                            v = value[0]
5324                        if isinstance(v, str):
5325                            data = data.mapped('display_name')
5326                        else:
5327                            data = data and data.ids or [False]
5328                    elif field and field.type in ('date', 'datetime'):
5329                        # convert all date and datetime values to datetime
5330                        normalize = Datetime.to_datetime
5331                        if isinstance(value, (list, tuple)):
5332                            value = [normalize(v) for v in value]
5333                        else:
5334                            value = normalize(value)
5335                        data = [normalize(d) for d in data]
5336                    if comparator in ('in', 'not in'):
5337                        if not (isinstance(value, list) or isinstance(value, tuple)):
5338                            value = [value]
5339
5340                    if comparator == '=':
5341                        ok = value in data
5342                    elif comparator == 'in':
5343                        ok = any(map(lambda x: x in data, value))
5344                    elif comparator == '<':
5345                        ok = any(map(lambda x: x is not None and x < value, data))
5346                    elif comparator == '>':
5347                        ok = any(map(lambda x: x is not None and x > value, data))
5348                    elif comparator == '<=':
5349                        ok = any(map(lambda x: x is not None and x <= value, data))
5350                    elif comparator == '>=':
5351                        ok = any(map(lambda x: x is not None and x >= value, data))
5352                    elif comparator in ('!=', '<>'):
5353                        ok = value not in data
5354                    elif comparator == 'not in':
5355                        ok = all(map(lambda x: x not in data, value))
5356                    elif comparator == 'not ilike':
5357                        data = [(x or "") for x in data]
5358                        ok = all(map(lambda x: value.lower() not in x.lower(), data))
5359                    elif comparator == 'ilike':
5360                        data = [(x or "").lower() for x in data]
5361                        ok = bool(fnmatch.filter(data, '*'+(value_esc or '').lower()+'*'))
5362                    elif comparator == 'not like':
5363                        data = [(x or "") for x in data]
5364                        ok = all(map(lambda x: value not in x, data))
5365                    elif comparator == 'like':
5366                        data = [(x or "") for x in data]
5367                        ok = bool(fnmatch.filter(data, value and '*'+value_esc+'*'))
5368                    elif comparator == '=?':
5369                        ok = (value in data) or not value
5370                    elif comparator in ('=like'):
5371                        data = [(x or "") for x in data]
5372                        ok = bool(fnmatch.filter(data, value_esc))
5373                    elif comparator in ('=ilike'):
5374                        data = [(x or "").lower() for x in data]
5375                        ok = bool(fnmatch.filter(data, value and value_esc.lower()))
5376                    else:
5377                        raise ValueError
5378                    if ok:
5379                       records_ids.add(rec.id)
5380                result.append(self.browse(records_ids))
5381        while len(result)>1:
5382            result.append(result.pop() & result.pop())
5383        return result[0]
5384
5385
5386    def sorted(self, key=None, reverse=False):
5387        """Return the recordset ``self`` ordered by ``key``.
5388
5389        :param key: either a function of one argument that returns a
5390            comparison key for each record, or a field name, or ``None``, in
5391            which case records are ordered according the default model's order
5392        :type key: callable or str or None
5393        :param bool reverse: if ``True``, return the result in reverse order
5394
5395        .. code-block:: python3
5396
5397            # sort records by name
5398            records.sorted(key=lambda r: r.name)
5399        """
5400        if key is None:
5401            recs = self.search([('id', 'in', self.ids)])
5402            return self.browse(reversed(recs._ids)) if reverse else recs
5403        if isinstance(key, str):
5404            key = itemgetter(key)
5405        return self.browse(item.id for item in sorted(self, key=key, reverse=reverse))
5406
5407    def update(self, values):
5408        """ Update the records in ``self`` with ``values``. """
5409        for record in self:
5410            for name, value in values.items():
5411                record[name] = value
5412
5413    @api.model
5414    def flush(self, fnames=None, records=None):
5415        """ Process all the pending computations (on all models), and flush all
5416        the pending updates to the database.
5417
5418        :param fnames (list<str>): list of field names to flush.  If given,
5419            limit the processing to the given fields of the current model.
5420        :param records (Model): if given (together with ``fnames``), limit the
5421            processing to the given records.
5422        """
5423        def process(model, id_vals):
5424            # group record ids by vals, to update in batch when possible
5425            updates = defaultdict(list)
5426            for rid, vals in id_vals.items():
5427                updates[frozendict(vals)].append(rid)
5428
5429            for vals, ids in updates.items():
5430                recs = model.browse(ids)
5431                try:
5432                    recs._write(vals)
5433                except MissingError:
5434                    recs.exists()._write(vals)
5435
5436        if fnames is None:
5437            # flush everything
5438            self.recompute()
5439            while self.env.all.towrite:
5440                model_name, id_vals = self.env.all.towrite.popitem()
5441                process(self.env[model_name], id_vals)
5442        else:
5443            # flush self's model if any of the fields must be flushed
5444            self.recompute(fnames, records=records)
5445
5446            # check whether any of 'records' must be flushed
5447            if records is not None:
5448                fnames = set(fnames)
5449                towrite = self.env.all.towrite.get(self._name)
5450                if not towrite or all(
5451                    fnames.isdisjoint(towrite.get(record.id, ()))
5452                    for record in records
5453                ):
5454                    return
5455
5456            # DLE P76: test_onchange_one2many_with_domain_on_related_field
5457            # ```
5458            # email.important = True
5459            # self.assertIn(email, discussion.important_emails)
5460            # ```
5461            # When a search on a field coming from a related occurs (the domain
5462            # on discussion.important_emails field), make sure the related field
5463            # is flushed
5464            model_fields = {}
5465            for fname in fnames:
5466                field = self._fields[fname]
5467                model_fields.setdefault(field.model_name, []).append(field)
5468                if field.related_field:
5469                    model_fields.setdefault(field.related_field.model_name, []).append(field.related_field)
5470            for model_name, fields in model_fields.items():
5471                if any(
5472                    field.name in vals
5473                    for vals in self.env.all.towrite.get(model_name, {}).values()
5474                    for field in fields
5475                ):
5476                    id_vals = self.env.all.towrite.pop(model_name)
5477                    process(self.env[model_name], id_vals)
5478
5479            # missing for one2many fields, flush their inverse
5480            for fname in fnames:
5481                field = self._fields[fname]
5482                if field.type == 'one2many' and field.inverse_name:
5483                    self.env[field.comodel_name].flush([field.inverse_name])
5484
5485    #
5486    # New records - represent records that do not exist in the database yet;
5487    # they are used to perform onchanges.
5488    #
5489
5490    @api.model
5491    def new(self, values={}, origin=None, ref=None):
5492        """ new([values], [origin], [ref]) -> record
5493
5494        Return a new record instance attached to the current environment and
5495        initialized with the provided ``value``. The record is *not* created
5496        in database, it only exists in memory.
5497
5498        One can pass an ``origin`` record, which is the actual record behind the
5499        result. It is retrieved as ``record._origin``. Two new records with the
5500        same origin record are considered equal.
5501
5502        One can also pass a ``ref`` value to identify the record among other new
5503        records. The reference is encapsulated in the ``id`` of the record.
5504        """
5505        if origin is not None:
5506            origin = origin.id
5507        record = self.browse([NewId(origin, ref)])
5508        record._update_cache(values, validate=False)
5509
5510        return record
5511
5512    @property
5513    def _origin(self):
5514        """ Return the actual records corresponding to ``self``. """
5515        ids = tuple(origin_ids(self._ids))
5516        prefetch_ids = IterableGenerator(origin_ids, self._prefetch_ids)
5517        return self._browse(self.env, ids, prefetch_ids)
5518
5519    #
5520    # "Dunder" methods
5521    #
5522
5523    def __bool__(self):
5524        """ Test whether ``self`` is nonempty. """
5525        return bool(getattr(self, '_ids', True))
5526    __nonzero__ = __bool__
5527
5528    def __len__(self):
5529        """ Return the size of ``self``. """
5530        return len(self._ids)
5531
5532    def __iter__(self):
5533        """ Return an iterator over ``self``. """
5534        if len(self._ids) > PREFETCH_MAX and self._prefetch_ids is self._ids:
5535            for ids in self.env.cr.split_for_in_conditions(self._ids):
5536                for id_ in ids:
5537                    yield self._browse(self.env, (id_,), ids)
5538        else:
5539            for id in self._ids:
5540                yield self._browse(self.env, (id,), self._prefetch_ids)
5541
5542    def __contains__(self, item):
5543        """ Test whether ``item`` (record or field name) is an element of ``self``.
5544            In the first case, the test is fully equivalent to::
5545
5546                any(item == record for record in self)
5547        """
5548        if isinstance(item, BaseModel) and self._name == item._name:
5549            return len(item) == 1 and item.id in self._ids
5550        elif isinstance(item, str):
5551            return item in self._fields
5552        else:
5553            raise TypeError("Mixing apples and oranges: %s in %s" % (item, self))
5554
5555    def __add__(self, other):
5556        """ Return the concatenation of two recordsets. """
5557        return self.concat(other)
5558
5559    def concat(self, *args):
5560        """ Return the concatenation of ``self`` with all the arguments (in
5561            linear time complexity).
5562        """
5563        ids = list(self._ids)
5564        for arg in args:
5565            if not (isinstance(arg, BaseModel) and arg._name == self._name):
5566                raise TypeError("Mixing apples and oranges: %s.concat(%s)" % (self, arg))
5567            ids.extend(arg._ids)
5568        return self.browse(ids)
5569
5570    def __sub__(self, other):
5571        """ Return the recordset of all the records in ``self`` that are not in
5572            ``other``. Note that recordset order is preserved.
5573        """
5574        if not isinstance(other, BaseModel) or self._name != other._name:
5575            raise TypeError("Mixing apples and oranges: %s - %s" % (self, other))
5576        other_ids = set(other._ids)
5577        return self.browse([id for id in self._ids if id not in other_ids])
5578
5579    def __and__(self, other):
5580        """ Return the intersection of two recordsets.
5581            Note that first occurrence order is preserved.
5582        """
5583        if not isinstance(other, BaseModel) or self._name != other._name:
5584            raise TypeError("Mixing apples and oranges: %s & %s" % (self, other))
5585        other_ids = set(other._ids)
5586        return self.browse(OrderedSet(id for id in self._ids if id in other_ids))
5587
5588    def __or__(self, other):
5589        """ Return the union of two recordsets.
5590            Note that first occurrence order is preserved.
5591        """
5592        return self.union(other)
5593
5594    def union(self, *args):
5595        """ Return the union of ``self`` with all the arguments (in linear time
5596            complexity, with first occurrence order preserved).
5597        """
5598        ids = list(self._ids)
5599        for arg in args:
5600            if not (isinstance(arg, BaseModel) and arg._name == self._name):
5601                raise TypeError("Mixing apples and oranges: %s.union(%s)" % (self, arg))
5602            ids.extend(arg._ids)
5603        return self.browse(OrderedSet(ids))
5604
5605    def __eq__(self, other):
5606        """ Test whether two recordsets are equivalent (up to reordering). """
5607        if not isinstance(other, BaseModel):
5608            if other:
5609                filename, lineno = frame_codeinfo(currentframe(), 1)
5610                _logger.warning("Comparing apples and oranges: %r == %r (%s:%s)",
5611                                self, other, filename, lineno)
5612            return NotImplemented
5613        return self._name == other._name and set(self._ids) == set(other._ids)
5614
5615    def __lt__(self, other):
5616        if not isinstance(other, BaseModel) or self._name != other._name:
5617            return NotImplemented
5618        return set(self._ids) < set(other._ids)
5619
5620    def __le__(self, other):
5621        if not isinstance(other, BaseModel) or self._name != other._name:
5622            return NotImplemented
5623        # these are much cheaper checks than a proper subset check, so
5624        # optimise for checking if a null or singleton are subsets of a
5625        # recordset
5626        if not self or self in other:
5627            return True
5628        return set(self._ids) <= set(other._ids)
5629
5630    def __gt__(self, other):
5631        if not isinstance(other, BaseModel) or self._name != other._name:
5632            return NotImplemented
5633        return set(self._ids) > set(other._ids)
5634
5635    def __ge__(self, other):
5636        if not isinstance(other, BaseModel) or self._name != other._name:
5637            return NotImplemented
5638        if not other or other in self:
5639            return True
5640        return set(self._ids) >= set(other._ids)
5641
5642    def __int__(self):
5643        return self.id or 0
5644
5645    def __repr__(self):
5646        return "%s%s" % (self._name, getattr(self, '_ids', ""))
5647
5648    def __hash__(self):
5649        if hasattr(self, '_ids'):
5650            return hash((self._name, frozenset(self._ids)))
5651        else:
5652            return hash(self._name)
5653
5654    def __getitem__(self, key):
5655        """ If ``key`` is an integer or a slice, return the corresponding record
5656            selection as an instance (attached to ``self.env``).
5657            Otherwise read the field ``key`` of the first record in ``self``.
5658
5659            Examples::
5660
5661                inst = model.search(dom)    # inst is a recordset
5662                r4 = inst[3]                # fourth record in inst
5663                rs = inst[10:20]            # subset of inst
5664                nm = rs['name']             # name of first record in inst
5665        """
5666        if isinstance(key, str):
5667            # important: one must call the field's getter
5668            return self._fields[key].__get__(self, type(self))
5669        elif isinstance(key, slice):
5670            return self.browse(self._ids[key])
5671        else:
5672            return self.browse((self._ids[key],))
5673
5674    def __setitem__(self, key, value):
5675        """ Assign the field ``key`` to ``value`` in record ``self``. """
5676        # important: one must call the field's setter
5677        return self._fields[key].__set__(self, value)
5678
5679    #
5680    # Cache and recomputation management
5681    #
5682
5683    @property
5684    def _cache(self):
5685        """ Return the cache of ``self``, mapping field names to values. """
5686        return RecordCache(self)
5687
5688    def _in_cache_without(self, field, limit=PREFETCH_MAX):
5689        """ Return records to prefetch that have no value in cache for ``field``
5690            (:class:`Field` instance), including ``self``.
5691            Return at most ``limit`` records.
5692        """
5693        ids = expand_ids(self.id, self._prefetch_ids)
5694        ids = self.env.cache.get_missing_ids(self.browse(ids), field)
5695        if limit:
5696            ids = itertools.islice(ids, limit)
5697        # Those records are aimed at being either fetched, or computed.  But the
5698        # method '_fetch_field' is not correct with new records: it considers
5699        # them as forbidden records, and clears their cache!  On the other hand,
5700        # compute methods are not invoked with a mix of real and new records for
5701        # the sake of code simplicity.
5702        return self.browse(ids)
5703
5704    @api.model
5705    def refresh(self):
5706        """ Clear the records cache.
5707
5708            .. deprecated:: 8.0
5709                The record cache is automatically invalidated.
5710        """
5711        self.invalidate_cache()
5712
5713    @api.model
5714    def invalidate_cache(self, fnames=None, ids=None):
5715        """ Invalidate the record caches after some records have been modified.
5716            If both ``fnames`` and ``ids`` are ``None``, the whole cache is cleared.
5717
5718            :param fnames: the list of modified fields, or ``None`` for all fields
5719            :param ids: the list of modified record ids, or ``None`` for all
5720        """
5721        if fnames is None:
5722            if ids is None:
5723                return self.env.cache.invalidate()
5724            fields = list(self._fields.values())
5725        else:
5726            fields = [self._fields[n] for n in fnames]
5727
5728        # invalidate fields and inverse fields, too
5729        spec = [(f, ids) for f in fields] + \
5730               [(invf, None) for f in fields for invf in self._field_inverses[f]]
5731        self.env.cache.invalidate(spec)
5732
5733    def modified(self, fnames, create=False, before=False):
5734        """ Notify that fields will be or have been modified on ``self``. This
5735        invalidates the cache where necessary, and prepares the recomputation of
5736        dependent stored fields.
5737
5738        :param fnames: iterable of field names modified on records ``self``
5739        :param create: whether called in the context of record creation
5740        :param before: whether called before modifying records ``self``
5741        """
5742        if not self or not fnames:
5743            return
5744
5745        # The triggers of a field F is a tree that contains the fields that
5746        # depend on F, together with the fields to inverse to find out which
5747        # records to recompute.
5748        #
5749        # For instance, assume that G depends on F, H depends on X.F, I depends
5750        # on W.X.F, and J depends on Y.F. The triggers of F will be the tree:
5751        #
5752        #                              [G]
5753        #                            X/   \Y
5754        #                          [H]     [J]
5755        #                        W/
5756        #                      [I]
5757        #
5758        # This tree provides perfect support for the trigger mechanism:
5759        # when F is # modified on records,
5760        #  - mark G to recompute on records,
5761        #  - mark H to recompute on inverse(X, records),
5762        #  - mark I to recompute on inverse(W, inverse(X, records)),
5763        #  - mark J to recompute on inverse(Y, records).
5764        if len(fnames) == 1:
5765            tree = self.pool.field_triggers.get(self._fields[next(iter(fnames))])
5766        else:
5767            # merge dependency trees to evaluate all triggers at once
5768            tree = {}
5769            for fname in fnames:
5770                node = self.pool.field_triggers.get(self._fields[fname])
5771                if node:
5772                    trigger_tree_merge(tree, node)
5773
5774        if tree:
5775            # determine what to compute (through an iterator)
5776            tocompute = self.sudo().with_context(active_test=False)._modified_triggers(tree, create)
5777
5778            # When called after modification, one should traverse backwards
5779            # dependencies by taking into account all fields already known to be
5780            # recomputed.  In that case, we mark fieds to compute as soon as
5781            # possible.
5782            #
5783            # When called before modification, one should mark fields to compute
5784            # after having inversed all dependencies.  This is because we
5785            # determine what currently depends on self, and it should not be
5786            # recomputed before the modification!
5787            if before:
5788                tocompute = list(tocompute)
5789
5790            # process what to compute
5791            for field, records, create in tocompute:
5792                records -= self.env.protected(field)
5793                if not records:
5794                    continue
5795                if field.compute and field.store:
5796                    if field.recursive:
5797                        recursively_marked = self.env.not_to_compute(field, records)
5798                    self.env.add_to_compute(field, records)
5799                else:
5800                    # Dont force the recomputation of compute fields which are
5801                    # not stored as this is not really necessary.
5802                    if field.recursive:
5803                        recursively_marked = records & self.env.cache.get_records(records, field)
5804                    self.env.cache.invalidate([(field, records._ids)])
5805                # recursively trigger recomputation of field's dependents
5806                if field.recursive:
5807                    recursively_marked.modified([field.name], create)
5808
5809    def _modified_triggers(self, tree, create=False):
5810        """ Return an iterator traversing a tree of field triggers on ``self``,
5811        traversing backwards field dependencies along the way, and yielding
5812        tuple ``(field, records, created)`` to recompute.
5813        """
5814        if not self:
5815            return
5816
5817        # first yield what to compute
5818        for field in tree.get(None, ()):
5819            yield field, self, create
5820
5821        # then traverse dependencies backwards, and proceed recursively
5822        for key, val in tree.items():
5823            if key is None:
5824                continue
5825            elif create and key.type in ('many2one', 'many2one_reference'):
5826                # upon creation, no other record has a reference to self
5827                continue
5828            else:
5829                # val is another tree of dependencies
5830                model = self.env[key.model_name]
5831                for invf in model._field_inverses[key]:
5832                    # use an inverse of field without domain
5833                    if not (invf.type in ('one2many', 'many2many') and invf.domain):
5834                        if invf.type == 'many2one_reference':
5835                            rec_ids = set()
5836                            for rec in self:
5837                                try:
5838                                    if rec[invf.model_field] == key.model_name:
5839                                        rec_ids.add(rec[invf.name])
5840                                except MissingError:
5841                                    continue
5842                            records = model.browse(rec_ids)
5843                        else:
5844                            try:
5845                                records = self[invf.name]
5846                            except MissingError:
5847                                records = self.exists()[invf.name]
5848
5849                        # TODO: find a better fix
5850                        if key.model_name == records._name:
5851                            if not any(self._ids):
5852                                # if self are new, records should be new as well
5853                                records = records.browse(it and NewId(it) for it in records._ids)
5854                            break
5855                else:
5856                    new_records = self.filtered(lambda r: not r.id)
5857                    real_records = self - new_records
5858                    records = model.browse()
5859                    if real_records:
5860                        records |= model.search([(key.name, 'in', real_records.ids)], order='id')
5861                    if new_records:
5862                        cache_records = self.env.cache.get_records(model, key)
5863                        records |= cache_records.filtered(lambda r: set(r[key.name]._ids) & set(self._ids))
5864                yield from records._modified_triggers(val)
5865
5866    @api.model
5867    def recompute(self, fnames=None, records=None):
5868        """ Recompute all function fields (or the given ``fnames`` if present).
5869            The fields and records to recompute have been determined by method
5870            :meth:`modified`.
5871        """
5872        def process(field):
5873            recs = self.env.records_to_compute(field)
5874            if not recs:
5875                return
5876            if field.compute and field.store:
5877                # do not force recomputation on new records; those will be
5878                # recomputed by accessing the field on the records
5879                recs = recs.filtered('id')
5880                try:
5881                    field.recompute(recs)
5882                except MissingError:
5883                    existing = recs.exists()
5884                    field.recompute(existing)
5885                    # mark the field as computed on missing records, otherwise
5886                    # they remain forever in the todo list, and lead to an
5887                    # infinite loop...
5888                    for f in recs.pool.field_computed[field]:
5889                        self.env.remove_to_compute(f, recs - existing)
5890            else:
5891                self.env.cache.invalidate([(field, recs._ids)])
5892                self.env.remove_to_compute(field, recs)
5893
5894        if fnames is None:
5895            # recompute everything
5896            for field in list(self.env.fields_to_compute()):
5897                process(field)
5898        else:
5899            fields = [self._fields[fname] for fname in fnames]
5900
5901            # check whether any 'records' must be computed
5902            if records is not None and not any(
5903                records & self.env.records_to_compute(field)
5904                for field in fields
5905            ):
5906                return
5907
5908            # recompute the given fields on self's model
5909            for field in fields:
5910                process(field)
5911
5912    #
5913    # Generic onchange method
5914    #
5915
5916    def _dependent_fields(self, field):
5917        """ Return an iterator on the fields that depend on ``field``. """
5918        def traverse(node):
5919            for key, val in node.items():
5920                if key is None:
5921                    yield from val
5922                else:
5923                    yield from traverse(val)
5924        return traverse(self.pool.field_triggers.get(field, {}))
5925
5926    def _has_onchange(self, field, other_fields):
5927        """ Return whether ``field`` should trigger an onchange event in the
5928            presence of ``other_fields``.
5929        """
5930        return (field.name in self._onchange_methods) or any(
5931            dep in other_fields for dep in self._dependent_fields(field.base_field)
5932        )
5933
5934    @api.model
5935    def _onchange_spec(self, view_info=None):
5936        """ Return the onchange spec from a view description; if not given, the
5937            result of ``self.fields_view_get()`` is used.
5938        """
5939        result = {}
5940
5941        # for traversing the XML arch and populating result
5942        def process(node, info, prefix):
5943            if node.tag == 'field':
5944                name = node.attrib['name']
5945                names = "%s.%s" % (prefix, name) if prefix else name
5946                if not result.get(names):
5947                    result[names] = node.attrib.get('on_change')
5948                # traverse the subviews included in relational fields
5949                for subinfo in info['fields'][name].get('views', {}).values():
5950                    process(etree.fromstring(subinfo['arch']), subinfo, names)
5951            else:
5952                for child in node:
5953                    process(child, info, prefix)
5954
5955        if view_info is None:
5956            view_info = self.fields_view_get()
5957        process(etree.fromstring(view_info['arch']), view_info, '')
5958        return result
5959
5960    def _onchange_eval(self, field_name, onchange, result):
5961        """ Apply onchange method(s) for field ``field_name`` with spec ``onchange``
5962            on record ``self``. Value assignments are applied on ``self``, while
5963            domain and warning messages are put in dictionary ``result``.
5964        """
5965        onchange = onchange.strip()
5966
5967        def process(res):
5968            if not res:
5969                return
5970            if res.get('value'):
5971                res['value'].pop('id', None)
5972                self.update({key: val for key, val in res['value'].items() if key in self._fields})
5973            if res.get('domain'):
5974                _logger.warning(
5975                    "onchange method %s returned a domain, this is deprecated",
5976                    method.__qualname__
5977                )
5978                result.setdefault('domain', {}).update(res['domain'])
5979            if res.get('warning'):
5980                result['warnings'].add((
5981                    res['warning'].get('title') or _("Warning"),
5982                    res['warning'].get('message') or "",
5983                    res['warning'].get('type') or "",
5984                ))
5985
5986        if onchange in ("1", "true"):
5987            for method in self._onchange_methods.get(field_name, ()):
5988                method_res = method(self)
5989                process(method_res)
5990            return
5991
5992    def onchange(self, values, field_name, field_onchange):
5993        """ Perform an onchange on the given field.
5994
5995            :param values: dictionary mapping field names to values, giving the
5996                current state of modification
5997            :param field_name: name of the modified field, or list of field
5998                names (in view order), or False
5999            :param field_onchange: dictionary mapping field names to their
6000                on_change attribute
6001
6002            When ``field_name`` is falsy, the method first adds default values
6003            to ``values``, computes the remaining fields, applies onchange
6004            methods to them, and return all the fields in ``field_onchange``.
6005        """
6006        # this is for tests using `Form`
6007        self.flush()
6008
6009        env = self.env
6010        if isinstance(field_name, list):
6011            names = field_name
6012        elif field_name:
6013            names = [field_name]
6014        else:
6015            names = []
6016
6017        first_call = not names
6018
6019        if any(name not in self._fields for name in names):
6020            return {}
6021
6022        def PrefixTree(model, dotnames):
6023            """ Return a prefix tree for sequences of field names. """
6024            if not dotnames:
6025                return {}
6026            # group dotnames by prefix
6027            suffixes = defaultdict(list)
6028            for dotname in dotnames:
6029                # name, *names = dotname.split('.', 1)
6030                names = dotname.split('.', 1)
6031                name = names.pop(0)
6032                suffixes[name].extend(names)
6033            # fill in prefix tree in fields order
6034            tree = OrderedDict()
6035            for name, field in model._fields.items():
6036                if name in suffixes:
6037                    tree[name] = subtree = PrefixTree(model[name], suffixes[name])
6038                    if subtree and field.type == 'one2many':
6039                        subtree.pop(field.inverse_name, None)
6040            return tree
6041
6042        class Snapshot(dict):
6043            """ A dict with the values of a record, following a prefix tree. """
6044            __slots__ = ()
6045
6046            def __init__(self, record, tree, fetch=True):
6047                # put record in dict to include it when comparing snapshots
6048                super(Snapshot, self).__init__({'<record>': record, '<tree>': tree})
6049                if fetch:
6050                    for name in tree:
6051                        self.fetch(name)
6052
6053            def fetch(self, name):
6054                """ Set the value of field ``name`` from the record's value. """
6055                record = self['<record>']
6056                tree = self['<tree>']
6057                if record._fields[name].type in ('one2many', 'many2many'):
6058                    # x2many fields are serialized as a list of line snapshots
6059                    self[name] = [Snapshot(line, tree[name]) for line in record[name]]
6060                else:
6061                    self[name] = record[name]
6062
6063            def has_changed(self, name):
6064                """ Return whether a field on record has changed. """
6065                if name not in self:
6066                    return True
6067                record = self['<record>']
6068                subnames = self['<tree>'][name]
6069                if record._fields[name].type not in ('one2many', 'many2many'):
6070                    return self[name] != record[name]
6071                return (
6072                    len(self[name]) != len(record[name])
6073                    or (
6074                        set(line_snapshot["<record>"].id for line_snapshot in self[name])
6075                        != set(record[name]._ids)
6076                    )
6077                    or any(
6078                        line_snapshot.has_changed(subname)
6079                        for line_snapshot in self[name]
6080                        for subname in subnames
6081                    )
6082                )
6083
6084            def diff(self, other, force=False):
6085                """ Return the values in ``self`` that differ from ``other``.
6086                    Requires record cache invalidation for correct output!
6087                """
6088                record = self['<record>']
6089                result = {}
6090                for name, subnames in self['<tree>'].items():
6091                    if name == 'id':
6092                        continue
6093                    if not force and other.get(name) == self[name]:
6094                        continue
6095                    field = record._fields[name]
6096                    if field.type not in ('one2many', 'many2many'):
6097                        result[name] = field.convert_to_onchange(self[name], record, {})
6098                    else:
6099                        # x2many fields: serialize value as commands
6100                        result[name] = commands = [(5,)]
6101                        # The purpose of the following line is to enable the prefetching.
6102                        # In the loop below, line._prefetch_ids actually depends on the
6103                        # value of record[name] in cache (see prefetch_ids on x2many
6104                        # fields).  But the cache has been invalidated before calling
6105                        # diff(), therefore evaluating line._prefetch_ids with an empty
6106                        # cache simply returns nothing, which discards the prefetching
6107                        # optimization!
6108                        record._cache[name] = tuple(
6109                            line_snapshot['<record>'].id for line_snapshot in self[name]
6110                        )
6111                        for line_snapshot in self[name]:
6112                            line = line_snapshot['<record>']
6113                            line = line._origin or line
6114                            if not line.id:
6115                                # new line: send diff from scratch
6116                                line_diff = line_snapshot.diff({})
6117                                commands.append((0, line.id.ref or 0, line_diff))
6118                            else:
6119                                # existing line: check diff from database
6120                                # (requires a clean record cache!)
6121                                line_diff = line_snapshot.diff(Snapshot(line, subnames))
6122                                if line_diff:
6123                                    # send all fields because the web client
6124                                    # might need them to evaluate modifiers
6125                                    line_diff = line_snapshot.diff({})
6126                                    commands.append((1, line.id, line_diff))
6127                                else:
6128                                    commands.append((4, line.id))
6129                return result
6130
6131        nametree = PrefixTree(self.browse(), field_onchange)
6132
6133        if first_call:
6134            names = [name for name in values if name != 'id']
6135            missing_names = [name for name in nametree if name not in values]
6136            defaults = self.default_get(missing_names)
6137            for name in missing_names:
6138                values[name] = defaults.get(name, False)
6139                if name in defaults:
6140                    names.append(name)
6141
6142        # prefetch x2many lines: this speeds up the initial snapshot by avoiding
6143        # to compute fields on new records as much as possible, as that can be
6144        # costly and is not necessary at all
6145        for name, subnames in nametree.items():
6146            if subnames and values.get(name):
6147                # retrieve all line ids in commands
6148                line_ids = set()
6149                for cmd in values[name]:
6150                    if cmd[0] in (1, 4):
6151                        line_ids.add(cmd[1])
6152                    elif cmd[0] == 6:
6153                        line_ids.update(cmd[2])
6154                # prefetch stored fields on lines
6155                lines = self[name].browse(line_ids)
6156                fnames = [subname
6157                          for subname in subnames
6158                          if lines._fields[subname].base_field.store]
6159                lines._read(fnames)
6160                # copy the cache of lines to their corresponding new records;
6161                # this avoids computing computed stored fields on new_lines
6162                new_lines = lines.browse(map(NewId, line_ids))
6163                cache = self.env.cache
6164                for fname in fnames:
6165                    field = lines._fields[fname]
6166                    cache.update(new_lines, field, [
6167                        field.convert_to_cache(value, new_line, validate=False)
6168                        for value, new_line in zip(cache.get_values(lines, field), new_lines)
6169                    ])
6170
6171        # Isolate changed values, to handle inconsistent data sent from the
6172        # client side: when a form view contains two one2many fields that
6173        # overlap, the lines that appear in both fields may be sent with
6174        # different data. Consider, for instance:
6175        #
6176        #   foo_ids: [line with value=1, ...]
6177        #   bar_ids: [line with value=1, ...]
6178        #
6179        # If value=2 is set on 'line' in 'bar_ids', the client sends
6180        #
6181        #   foo_ids: [line with value=1, ...]
6182        #   bar_ids: [line with value=2, ...]
6183        #
6184        # The idea is to put 'foo_ids' in cache first, so that the snapshot
6185        # contains value=1 for line in 'foo_ids'. The snapshot is then updated
6186        # with the value of `bar_ids`, which will contain value=2 on line.
6187        #
6188        # The issue also occurs with other fields. For instance, an onchange on
6189        # a move line has a value for the field 'move_id' that contains the
6190        # values of the move, among which the one2many that contains the line
6191        # itself, with old values!
6192        #
6193        changed_values = {name: values[name] for name in names}
6194        # set changed values to null in initial_values; not setting them
6195        # triggers default_get() on the new record when creating snapshot0
6196        initial_values = dict(values, **dict.fromkeys(names, False))
6197
6198        # do not force delegate fields to False
6199        for name in self._inherits.values():
6200            if not initial_values.get(name, True):
6201                initial_values.pop(name)
6202
6203        # create a new record with values
6204        record = self.new(initial_values, origin=self)
6205
6206        # make a snapshot based on the initial values of record
6207        snapshot0 = Snapshot(record, nametree, fetch=(not first_call))
6208
6209        # store changed values in cache; also trigger recomputations based on
6210        # subfields (e.g., line.a has been modified, line.b is computed stored
6211        # and depends on line.a, but line.b is not in the form view)
6212        record._update_cache(changed_values, validate=False)
6213
6214        # update snapshot0 with changed values
6215        for name in names:
6216            snapshot0.fetch(name)
6217
6218        # Determine which field(s) should be triggered an onchange. On the first
6219        # call, 'names' only contains fields with a default. If 'self' is a new
6220        # line in a one2many field, 'names' also contains the one2many's inverse
6221        # field, and that field may not be in nametree.
6222        todo = list(unique(itertools.chain(names, nametree))) if first_call else list(names)
6223        done = set()
6224
6225        # mark fields to do as modified to trigger recomputations
6226        protected = [self._fields[name] for name in names]
6227        with self.env.protecting(protected, record):
6228            record.modified(todo)
6229            for name in todo:
6230                field = self._fields[name]
6231                if field.inherited:
6232                    # modifying an inherited field should modify the parent
6233                    # record accordingly; because we don't actually assign the
6234                    # modified field on the record, the modification on the
6235                    # parent record has to be done explicitly
6236                    parent = record[field.related[0]]
6237                    parent[name] = record[name]
6238
6239        result = {'warnings': OrderedSet()}
6240
6241        # process names in order
6242        while todo:
6243            # apply field-specific onchange methods
6244            for name in todo:
6245                if field_onchange.get(name):
6246                    record._onchange_eval(name, field_onchange[name], result)
6247                done.add(name)
6248
6249            # determine which fields to process for the next pass
6250            todo = [
6251                name
6252                for name in nametree
6253                if name not in done and snapshot0.has_changed(name)
6254            ]
6255
6256            if not env.context.get('recursive_onchanges', True):
6257                todo = []
6258
6259        # make the snapshot with the final values of record
6260        snapshot1 = Snapshot(record, nametree)
6261
6262        # determine values that have changed by comparing snapshots
6263        self.invalidate_cache()
6264        result['value'] = snapshot1.diff(snapshot0, force=first_call)
6265
6266        # format warnings
6267        warnings = result.pop('warnings')
6268        if len(warnings) == 1:
6269            title, message, type = warnings.pop()
6270            if not type:
6271                type = 'dialog'
6272            result['warning'] = dict(title=title, message=message, type=type)
6273        elif len(warnings) > 1:
6274            # concatenate warning titles and messages
6275            title = _("Warnings")
6276            message = '\n\n'.join([warn_title + '\n\n' + warn_message for warn_title, warn_message, warn_type in warnings])
6277            result['warning'] = dict(title=title, message=message, type='dialog')
6278
6279        return result
6280
6281    def _get_placeholder_filename(self, field=None):
6282        """ Returns the filename of the placeholder to use,
6283            set on web/static/src/img by default, or the
6284            complete path to access it (eg: module/path/to/image.png).
6285        """
6286        return 'placeholder.png'
6287
6288    def _populate_factories(self):
6289        """ Generates a factory for the different fields of the model.
6290
6291        ``factory`` is a generator of values (dict of field values).
6292
6293        Factory skeleton::
6294
6295            def generator(iterator, field_name, model_name):
6296                for counter, values in enumerate(iterator):
6297                    # values.update(dict())
6298                    yield values
6299
6300        See :mod:`odoo.tools.populate` for population tools and applications.
6301
6302        :returns: list of pairs(field_name, factory) where `factory` is a generator function.
6303        :rtype: list(tuple(str, generator))
6304
6305        .. note::
6306
6307            It is the responsibility of the generator to handle the field_name correctly.
6308            The generator could generate values for multiple fields together. In this case,
6309            the field_name should be more a "field_group" (should be begin by a "_"), covering
6310            the different fields updated by the generator (e.g. "_address" for a generator
6311            updating multiple address fields).
6312        """
6313        return []
6314
6315    @property
6316    def _populate_sizes(self):
6317        """ Return a dict mapping symbolic sizes (``'small'``, ``'medium'``, ``'large'``) to integers,
6318        giving the minimal number of records that :meth:`_populate` should create.
6319
6320        The default population sizes are:
6321
6322        * ``small`` : 10
6323        * ``medium`` : 100
6324        * ``large`` : 1000
6325        """
6326        return {
6327            'small': 10,  # minimal representative set
6328            'medium': 100,  # average database load
6329            'large': 1000, # maxi database load
6330        }
6331
6332    @property
6333    def _populate_dependencies(self):
6334        """ Return the list of models which have to be populated before the current one.
6335
6336        :rtype: list
6337        """
6338        return []
6339
6340    def _populate(self, size):
6341        """ Create records to populate this model.
6342
6343        :param str size: symbolic size for the number of records: ``'small'``, ``'medium'`` or ``'large'``
6344        """
6345        batch_size = 1000
6346        min_size = self._populate_sizes[size]
6347
6348        record_count = 0
6349        create_values = []
6350        complete = False
6351        field_generators = self._populate_factories()
6352        if not field_generators:
6353            return self.browse() # maybe create an automatic generator?
6354
6355        records_batches = []
6356        generator = populate.chain_factories(field_generators, self._name)
6357        while record_count <= min_size or not complete:
6358            values = next(generator)
6359            complete = values.pop('__complete')
6360            create_values.append(values)
6361            record_count += 1
6362            if len(create_values) >= batch_size:
6363                _logger.info('Batch: %s/%s', record_count, min_size)
6364                records_batches.append(self.create(create_values))
6365                create_values = []
6366
6367        if create_values:
6368            records_batches.append(self.create(create_values))
6369        return self.concat(*records_batches)
6370
6371
6372collections.Set.register(BaseModel)
6373# not exactly true as BaseModel doesn't have __reversed__, index or count
6374collections.Sequence.register(BaseModel)
6375
6376class RecordCache(MutableMapping):
6377    """ A mapping from field names to values, to read and update the cache of a record. """
6378    __slots__ = ['_record']
6379
6380    def __init__(self, record):
6381        assert len(record) == 1, "Unexpected RecordCache(%s)" % record
6382        self._record = record
6383
6384    def __contains__(self, name):
6385        """ Return whether `record` has a cached value for field ``name``. """
6386        field = self._record._fields[name]
6387        return self._record.env.cache.contains(self._record, field)
6388
6389    def __getitem__(self, name):
6390        """ Return the cached value of field ``name`` for `record`. """
6391        field = self._record._fields[name]
6392        return self._record.env.cache.get(self._record, field)
6393
6394    def __setitem__(self, name, value):
6395        """ Assign the cached value of field ``name`` for ``record``. """
6396        field = self._record._fields[name]
6397        self._record.env.cache.set(self._record, field, value)
6398
6399    def __delitem__(self, name):
6400        """ Remove the cached value of field ``name`` for ``record``. """
6401        field = self._record._fields[name]
6402        self._record.env.cache.remove(self._record, field)
6403
6404    def __iter__(self):
6405        """ Iterate over the field names with a cached value. """
6406        for field in self._record.env.cache.get_fields(self._record):
6407            yield field.name
6408
6409    def __len__(self):
6410        """ Return the number of fields with a cached value. """
6411        return sum(1 for name in self)
6412
6413
6414AbstractModel = BaseModel
6415
6416class Model(AbstractModel):
6417    """ Main super-class for regular database-persisted Odoo models.
6418
6419    Odoo models are created by inheriting from this class::
6420
6421        class user(Model):
6422            ...
6423
6424    The system will later instantiate the class once per database (on
6425    which the class' module is installed).
6426    """
6427    _auto = True                # automatically create database backend
6428    _register = False           # not visible in ORM registry, meant to be python-inherited only
6429    _abstract = False           # not abstract
6430    _transient = False          # not transient
6431
6432class TransientModel(Model):
6433    """ Model super-class for transient records, meant to be temporarily
6434    persistent, and regularly vacuum-cleaned.
6435
6436    A TransientModel has a simplified access rights management, all users can
6437    create new records, and may only access the records they created. The
6438    superuser has unrestricted access to all TransientModel records.
6439    """
6440    _auto = True                # automatically create database backend
6441    _register = False           # not visible in ORM registry, meant to be python-inherited only
6442    _abstract = False           # not abstract
6443    _transient = True           # transient
6444
6445    @api.autovacuum
6446    def _transient_vacuum(self):
6447        """Clean the transient records.
6448
6449        This unlinks old records from the transient model tables whenever the
6450        "_transient_max_count" or "_max_age" conditions (if any) are reached.
6451        Actual cleaning will happen only once every "_transient_check_time" calls.
6452        This means this method can be called frequently called (e.g. whenever
6453        a new record is created).
6454        Example with both max_hours and max_count active:
6455        Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
6456        table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
6457        5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
6458        - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
6459        - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
6460          would immediately cause the maximum to be reached again.
6461        - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
6462        """
6463        if self._transient_max_hours:
6464            # Age-based expiration
6465            self._transient_clean_rows_older_than(self._transient_max_hours * 60 * 60)
6466
6467        if self._transient_max_count:
6468            # Count-based expiration
6469            self._transient_clean_old_rows(self._transient_max_count)
6470
6471    def _transient_clean_old_rows(self, max_count):
6472        # Check how many rows we have in the table
6473        query = 'SELECT count(*) FROM "{}"'.format(self._table)
6474        self._cr.execute(query)
6475        [count] = self._cr.fetchone()
6476        if count > max_count:
6477            self._transient_clean_rows_older_than(300)
6478
6479    def _transient_clean_rows_older_than(self, seconds):
6480        # Never delete rows used in last 5 minutes
6481        seconds = max(seconds, 300)
6482        query = """
6483            SELECT id FROM "{}"
6484            WHERE COALESCE(write_date, create_date, (now() AT TIME ZONE 'UTC'))::timestamp
6485                < (now() AT TIME ZONE 'UTC') - interval %s
6486        """.format(self._table)
6487        self._cr.execute(query, ["%s seconds" % seconds])
6488        ids = [x[0] for x in self._cr.fetchall()]
6489        self.sudo().browse(ids).unlink()
6490
6491
6492def itemgetter_tuple(items):
6493    """ Fixes itemgetter inconsistency (useful in some cases) of not returning
6494    a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
6495    """
6496    if len(items) == 0:
6497        return lambda a: ()
6498    if len(items) == 1:
6499        return lambda gettable: (gettable[items[0]],)
6500    return operator.itemgetter(*items)
6501
6502def convert_pgerror_not_null(model, fields, info, e):
6503    if e.diag.table_name != model._table:
6504        return {'message': _(u"Missing required value for the field '%s'") % (e.diag.column_name)}
6505
6506    field_name = e.diag.column_name
6507    field = fields[field_name]
6508    message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
6509    return {
6510        'message': message,
6511        'field': field_name,
6512    }
6513
6514def convert_pgerror_unique(model, fields, info, e):
6515    # new cursor since we're probably in an error handler in a blown
6516    # transaction which may not have been rollbacked/cleaned yet
6517    with closing(model.env.registry.cursor()) as cr_tmp:
6518        cr_tmp.execute("""
6519            SELECT
6520                conname AS "constraint name",
6521                t.relname AS "table name",
6522                ARRAY(
6523                    SELECT attname FROM pg_attribute
6524                    WHERE attrelid = conrelid
6525                      AND attnum = ANY(conkey)
6526                ) as "columns"
6527            FROM pg_constraint
6528            JOIN pg_class t ON t.oid = conrelid
6529            WHERE conname = %s
6530        """, [e.diag.constraint_name])
6531        constraint, table, ufields = cr_tmp.fetchone() or (None, None, None)
6532    # if the unique constraint is on an expression or on an other table
6533    if not ufields or model._table != table:
6534        return {'message': tools.ustr(e)}
6535
6536    # TODO: add stuff from e.diag.message_hint? provides details about the constraint & duplication values but may be localized...
6537    if len(ufields) == 1:
6538        field_name = ufields[0]
6539        field = fields[field_name]
6540        message = _(u"The value for the field '%s' already exists (this is probably '%s' in the current model).") % (field_name, field['string'])
6541        return {
6542            'message': message,
6543            'field': field_name,
6544        }
6545    field_strings = [fields[fname]['string'] for fname in ufields]
6546    message = _(u"The values for the fields '%s' already exist (they are probably '%s' in the current model).") % (', '.join(ufields), ', '.join(field_strings))
6547    return {
6548        'message': message,
6549        # no field, unclear which one we should pick and they could be in any order
6550    }
6551
6552def convert_pgerror_constraint(model, fields, info, e):
6553    sql_constraints = dict([(('%s_%s') % (e.diag.table_name, x[0]), x) for x in model._sql_constraints])
6554    if e.diag.constraint_name in sql_constraints.keys():
6555        return {'message': "'%s'" % sql_constraints[e.diag.constraint_name][2]}
6556    return {'message': tools.ustr(e)}
6557
6558PGERROR_TO_OE = defaultdict(
6559    # shape of mapped converters
6560    lambda: (lambda model, fvg, info, pgerror: {'message': tools.ustr(pgerror)}), {
6561    '23502': convert_pgerror_not_null,
6562    '23505': convert_pgerror_unique,
6563    '23514': convert_pgerror_constraint,
6564})
6565
6566
6567def lazy_name_get(self):
6568    """ Evaluate self.name_get() lazily. """
6569    names = tools.lazy(lambda: dict(self.name_get()))
6570    return [(rid, tools.lazy(operator.getitem, names, rid)) for rid in self.ids]
6571
6572
6573# keep those imports here to avoid dependency cycle errors
6574from .osv import expression
6575from .fields import Field, Datetime
6576