1"""
2Functions inferring the syntax tree.
3"""
4import copy
5
6from parso.python import tree
7
8from jedi import debug
9from jedi import parser_utils
10from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \
11    iterator_to_value_set, iterate_values
12from jedi.inference.lazy_value import LazyTreeValue
13from jedi.inference import compiled
14from jedi.inference import recursion
15from jedi.inference import analysis
16from jedi.inference import imports
17from jedi.inference import arguments
18from jedi.inference.value import ClassValue, FunctionValue
19from jedi.inference.value import iterable
20from jedi.inference.value.dynamic_arrays import ListModification, DictModification
21from jedi.inference.value import TreeInstance
22from jedi.inference.helpers import is_string, is_literal, is_number, \
23    get_names_of_node, is_big_annoying_library
24from jedi.inference.compiled.access import COMPARISON_OPERATORS
25from jedi.inference.cache import inference_state_method_cache
26from jedi.inference.gradual.stub_value import VersionInfo
27from jedi.inference.gradual import annotation
28from jedi.inference.names import TreeNameDefinition
29from jedi.inference.context import CompForContext
30from jedi.inference.value.decorator import Decoratee
31from jedi.plugins import plugin_manager
32
33operator_to_magic_method = {
34    '+': '__add__',
35    '-': '__sub__',
36    '*': '__mul__',
37    '@': '__matmul__',
38    '/': '__truediv__',
39    '//': '__floordiv__',
40    '%': '__mod__',
41    '**': '__pow__',
42    '<<': '__lshift__',
43    '>>': '__rshift__',
44    '&': '__and__',
45    '|': '__or__',
46    '^': '__xor__',
47}
48
49reverse_operator_to_magic_method = {
50    k: '__r' + v[2:] for k, v in operator_to_magic_method.items()
51}
52
53
54def _limit_value_infers(func):
55    """
56    This is for now the way how we limit type inference going wild. There are
57    other ways to ensure recursion limits as well. This is mostly necessary
58    because of instance (self) access that can be quite tricky to limit.
59
60    I'm still not sure this is the way to go, but it looks okay for now and we
61    can still go anther way in the future. Tests are there. ~ dave
62    """
63    def wrapper(context, *args, **kwargs):
64        n = context.tree_node
65        inference_state = context.inference_state
66        try:
67            inference_state.inferred_element_counts[n] += 1
68            maximum = 300
69            if context.parent_context is None \
70                    and context.get_value() is inference_state.builtins_module:
71                # Builtins should have a more generous inference limit.
72                # It is important that builtins can be executed, otherwise some
73                # functions that depend on certain builtins features would be
74                # broken, see e.g. GH #1432
75                maximum *= 100
76
77            if inference_state.inferred_element_counts[n] > maximum:
78                debug.warning('In value %s there were too many inferences.', n)
79                return NO_VALUES
80        except KeyError:
81            inference_state.inferred_element_counts[n] = 1
82        return func(context, *args, **kwargs)
83
84    return wrapper
85
86
87def infer_node(context, element):
88    if isinstance(context, CompForContext):
89        return _infer_node(context, element)
90
91    if_stmt = element
92    while if_stmt is not None:
93        if_stmt = if_stmt.parent
94        if if_stmt.type in ('if_stmt', 'for_stmt'):
95            break
96        if parser_utils.is_scope(if_stmt):
97            if_stmt = None
98            break
99    predefined_if_name_dict = context.predefined_names.get(if_stmt)
100    # TODO there's a lot of issues with this one. We actually should do
101    # this in a different way. Caching should only be active in certain
102    # cases and this all sucks.
103    if predefined_if_name_dict is None and if_stmt \
104            and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis:
105        if_stmt_test = if_stmt.children[1]
106        name_dicts = [{}]
107        # If we already did a check, we don't want to do it again -> If
108        # value.predefined_names is filled, we stop.
109        # We don't want to check the if stmt itself, it's just about
110        # the content.
111        if element.start_pos > if_stmt_test.end_pos:
112            # Now we need to check if the names in the if_stmt match the
113            # names in the suite.
114            if_names = get_names_of_node(if_stmt_test)
115            element_names = get_names_of_node(element)
116            str_element_names = [e.value for e in element_names]
117            if any(i.value in str_element_names for i in if_names):
118                for if_name in if_names:
119                    definitions = context.inference_state.infer(context, if_name)
120                    # Every name that has multiple different definitions
121                    # causes the complexity to rise. The complexity should
122                    # never fall below 1.
123                    if len(definitions) > 1:
124                        if len(name_dicts) * len(definitions) > 16:
125                            debug.dbg('Too many options for if branch inference %s.', if_stmt)
126                            # There's only a certain amount of branches
127                            # Jedi can infer, otherwise it will take to
128                            # long.
129                            name_dicts = [{}]
130                            break
131
132                        original_name_dicts = list(name_dicts)
133                        name_dicts = []
134                        for definition in definitions:
135                            new_name_dicts = list(original_name_dicts)
136                            for i, name_dict in enumerate(new_name_dicts):
137                                new_name_dicts[i] = name_dict.copy()
138                                new_name_dicts[i][if_name.value] = ValueSet([definition])
139
140                            name_dicts += new_name_dicts
141                    else:
142                        for name_dict in name_dicts:
143                            name_dict[if_name.value] = definitions
144        if len(name_dicts) > 1:
145            result = NO_VALUES
146            for name_dict in name_dicts:
147                with context.predefine_names(if_stmt, name_dict):
148                    result |= _infer_node(context, element)
149            return result
150        else:
151            return _infer_node_if_inferred(context, element)
152    else:
153        if predefined_if_name_dict:
154            return _infer_node(context, element)
155        else:
156            return _infer_node_if_inferred(context, element)
157
158
159def _infer_node_if_inferred(context, element):
160    """
161    TODO This function is temporary: Merge with infer_node.
162    """
163    parent = element
164    while parent is not None:
165        parent = parent.parent
166        predefined_if_name_dict = context.predefined_names.get(parent)
167        if predefined_if_name_dict is not None:
168            return _infer_node(context, element)
169    return _infer_node_cached(context, element)
170
171
172@inference_state_method_cache(default=NO_VALUES)
173def _infer_node_cached(context, element):
174    return _infer_node(context, element)
175
176
177@debug.increase_indent
178@_limit_value_infers
179def _infer_node(context, element):
180    debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
181    inference_state = context.inference_state
182    typ = element.type
183    if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
184        return infer_atom(context, element)
185    elif typ == 'lambdef':
186        return ValueSet([FunctionValue.from_context(context, element)])
187    elif typ == 'expr_stmt':
188        return infer_expr_stmt(context, element)
189    elif typ in ('power', 'atom_expr'):
190        first_child = element.children[0]
191        children = element.children[1:]
192        had_await = False
193        if first_child.type == 'keyword' and first_child.value == 'await':
194            had_await = True
195            first_child = children.pop(0)
196
197        value_set = context.infer_node(first_child)
198        for (i, trailer) in enumerate(children):
199            if trailer == '**':  # has a power operation.
200                right = context.infer_node(children[i + 1])
201                value_set = _infer_comparison(
202                    context,
203                    value_set,
204                    trailer,
205                    right
206                )
207                break
208            value_set = infer_trailer(context, value_set, trailer)
209
210        if had_await:
211            return value_set.py__await__().py__stop_iteration_returns()
212        return value_set
213    elif typ in ('testlist_star_expr', 'testlist',):
214        # The implicit tuple in statements.
215        return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)])
216    elif typ in ('not_test', 'factor'):
217        value_set = context.infer_node(element.children[-1])
218        for operator in element.children[:-1]:
219            value_set = infer_factor(value_set, operator)
220        return value_set
221    elif typ == 'test':
222        # `x if foo else y` case.
223        return (context.infer_node(element.children[0])
224                | context.infer_node(element.children[-1]))
225    elif typ == 'operator':
226        # Must be an ellipsis, other operators are not inferred.
227        if element.value != '...':
228            origin = element.parent
229            raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
230        return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')])
231    elif typ == 'dotted_name':
232        value_set = infer_atom(context, element.children[0])
233        for next_name in element.children[2::2]:
234            value_set = value_set.py__getattribute__(next_name, name_context=context)
235        return value_set
236    elif typ == 'eval_input':
237        return context.infer_node(element.children[0])
238    elif typ == 'annassign':
239        return annotation.infer_annotation(context, element.children[1]) \
240            .execute_annotation()
241    elif typ == 'yield_expr':
242        if len(element.children) and element.children[1].type == 'yield_arg':
243            # Implies that it's a yield from.
244            element = element.children[1].children[1]
245            generators = context.infer_node(element) \
246                .py__getattribute__('__iter__').execute_with_values()
247            return generators.py__stop_iteration_returns()
248
249        # Generator.send() is not implemented.
250        return NO_VALUES
251    elif typ == 'namedexpr_test':
252        return context.infer_node(element.children[2])
253    else:
254        return infer_or_test(context, element)
255
256
257def infer_trailer(context, atom_values, trailer):
258    trailer_op, node = trailer.children[:2]
259    if node == ')':  # `arglist` is optional.
260        node = None
261
262    if trailer_op == '[':
263        trailer_op, node, _ = trailer.children
264        return atom_values.get_item(
265            _infer_subscript_list(context, node),
266            ContextualizedNode(context, trailer)
267        )
268    else:
269        debug.dbg('infer_trailer: %s in %s', trailer, atom_values)
270        if trailer_op == '.':
271            return atom_values.py__getattribute__(
272                name_context=context,
273                name_or_str=node
274            )
275        else:
276            assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
277            args = arguments.TreeArguments(context.inference_state, context, node, trailer)
278            return atom_values.execute(args)
279
280
281def infer_atom(context, atom):
282    """
283    Basically to process ``atom`` nodes. The parser sometimes doesn't
284    generate the node (because it has just one child). In that case an atom
285    might be a name or a literal as well.
286    """
287    state = context.inference_state
288    if atom.type == 'name':
289        # This is the first global lookup.
290        stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom
291        if stmt.type == 'if_stmt':
292            if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()):
293                stmt = atom
294        elif stmt.type == 'lambdef':
295            stmt = atom
296        position = stmt.start_pos
297        if _is_annotation_name(atom):
298            # Since Python 3.7 (with from __future__ import annotations),
299            # annotations are essentially strings and can reference objects
300            # that are defined further down in code. Therefore just set the
301            # position to None, so the finder will not try to stop at a certain
302            # position in the module.
303            position = None
304        return context.py__getattribute__(atom, position=position)
305    elif atom.type == 'keyword':
306        # For False/True/None
307        if atom.value in ('False', 'True', 'None'):
308            return ValueSet([compiled.builtin_from_name(state, atom.value)])
309        elif atom.value == 'yield':
310            # Contrary to yield from, yield can just appear alone to return a
311            # value when used with `.send()`.
312            return NO_VALUES
313        assert False, 'Cannot infer the keyword %s' % atom
314
315    elif isinstance(atom, tree.Literal):
316        string = state.compiled_subprocess.safe_literal_eval(atom.value)
317        return ValueSet([compiled.create_simple_object(state, string)])
318    elif atom.type == 'strings':
319        # Will be multiple string.
320        value_set = infer_atom(context, atom.children[0])
321        for string in atom.children[1:]:
322            right = infer_atom(context, string)
323            value_set = _infer_comparison(context, value_set, '+', right)
324        return value_set
325    elif atom.type == 'fstring':
326        return compiled.get_string_value_set(state)
327    else:
328        c = atom.children
329        # Parentheses without commas are not tuples.
330        if c[0] == '(' and not len(c) == 2 \
331                and not(c[1].type == 'testlist_comp'
332                        and len(c[1].children) > 1):
333            return context.infer_node(c[1])
334
335        try:
336            comp_for = c[1].children[1]
337        except (IndexError, AttributeError):
338            pass
339        else:
340            if comp_for == ':':
341                # Dict comprehensions have a colon at the 3rd index.
342                try:
343                    comp_for = c[1].children[3]
344                except IndexError:
345                    pass
346
347            if comp_for.type in ('comp_for', 'sync_comp_for'):
348                return ValueSet([iterable.comprehension_from_atom(
349                    state, context, atom
350                )])
351
352        # It's a dict/list/tuple literal.
353        array_node = c[1]
354        try:
355            array_node_c = array_node.children
356        except AttributeError:
357            array_node_c = []
358        if c[0] == '{' and (array_node == '}' or ':' in array_node_c
359                            or '**' in array_node_c):
360            new_value = iterable.DictLiteralValue(state, context, atom)
361        else:
362            new_value = iterable.SequenceLiteralValue(state, context, atom)
363        return ValueSet([new_value])
364
365
366@_limit_value_infers
367def infer_expr_stmt(context, stmt, seek_name=None):
368    with recursion.execution_allowed(context.inference_state, stmt) as allowed:
369        if allowed:
370            if seek_name is not None:
371                pep0484_values = \
372                    annotation.find_type_from_comment_hint_assign(context, stmt, seek_name)
373                if pep0484_values:
374                    return pep0484_values
375
376            return _infer_expr_stmt(context, stmt, seek_name)
377    return NO_VALUES
378
379
380@debug.increase_indent
381def _infer_expr_stmt(context, stmt, seek_name=None):
382    """
383    The starting point of the completion. A statement always owns a call
384    list, which are the calls, that a statement does. In case multiple
385    names are defined in the statement, `seek_name` returns the result for
386    this name.
387
388    expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
389                     ('=' (yield_expr|testlist_star_expr))*)
390    annassign: ':' test ['=' test]
391    augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
392                '<<=' | '>>=' | '**=' | '//=')
393
394    :param stmt: A `tree.ExprStmt`.
395    """
396    def check_setitem(stmt):
397        atom_expr = stmt.children[0]
398        if atom_expr.type not in ('atom_expr', 'power'):
399            return False, None
400        name = atom_expr.children[0]
401        if name.type != 'name' or len(atom_expr.children) != 2:
402            return False, None
403        trailer = atom_expr.children[-1]
404        return trailer.children[0] == '[', trailer.children[1]
405
406    debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
407    rhs = stmt.get_rhs()
408
409    value_set = context.infer_node(rhs)
410
411    if seek_name:
412        n = TreeNameDefinition(context, seek_name)
413        value_set = check_tuple_assignments(n, value_set)
414
415    first_operator = next(stmt.yield_operators(), None)
416    is_setitem, subscriptlist = check_setitem(stmt)
417    is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator'
418    if is_annassign or is_setitem:
419        # `=` is always the last character in aug assignments -> -1
420        name = stmt.get_defined_names(include_setitem=True)[0].value
421        left_values = context.py__getattribute__(name, position=stmt.start_pos)
422
423        if is_setitem:
424            def to_mod(v):
425                c = ContextualizedSubscriptListNode(context, subscriptlist)
426                if v.array_type == 'dict':
427                    return DictModification(v, value_set, c)
428                elif v.array_type == 'list':
429                    return ListModification(v, value_set, c)
430                return v
431
432            value_set = ValueSet(to_mod(v) for v in left_values)
433        else:
434            operator = copy.copy(first_operator)
435            operator.value = operator.value[:-1]
436            for_stmt = tree.search_ancestor(stmt, 'for_stmt')
437            if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
438                    and parser_utils.for_stmt_defines_one_name(for_stmt):
439                # Iterate through result and add the values, that's possible
440                # only in for loops without clutter, because they are
441                # predictable. Also only do it, if the variable is not a tuple.
442                node = for_stmt.get_testlist()
443                cn = ContextualizedNode(context, node)
444                ordered = list(cn.infer().iterate(cn))
445
446                for lazy_value in ordered:
447                    dct = {for_stmt.children[1].value: lazy_value.infer()}
448                    with context.predefine_names(for_stmt, dct):
449                        t = context.infer_node(rhs)
450                        left_values = _infer_comparison(context, left_values, operator, t)
451                value_set = left_values
452            else:
453                value_set = _infer_comparison(context, left_values, operator, value_set)
454    debug.dbg('infer_expr_stmt result %s', value_set)
455    return value_set
456
457
458def infer_or_test(context, or_test):
459    iterator = iter(or_test.children)
460    types = context.infer_node(next(iterator))
461    for operator in iterator:
462        right = next(iterator)
463        if operator.type == 'comp_op':  # not in / is not
464            operator = ' '.join(c.value for c in operator.children)
465
466        # handle type inference of and/or here.
467        if operator in ('and', 'or'):
468            left_bools = set(left.py__bool__() for left in types)
469            if left_bools == {True}:
470                if operator == 'and':
471                    types = context.infer_node(right)
472            elif left_bools == {False}:
473                if operator != 'and':
474                    types = context.infer_node(right)
475            # Otherwise continue, because of uncertainty.
476        else:
477            types = _infer_comparison(context, types, operator,
478                                      context.infer_node(right))
479    debug.dbg('infer_or_test types %s', types)
480    return types
481
482
483@iterator_to_value_set
484def infer_factor(value_set, operator):
485    """
486    Calculates `+`, `-`, `~` and `not` prefixes.
487    """
488    for value in value_set:
489        if operator == '-':
490            if is_number(value):
491                yield value.negate()
492        elif operator == 'not':
493            b = value.py__bool__()
494            if b is None:  # Uncertainty.
495                return
496            yield compiled.create_simple_object(value.inference_state, not b)
497        else:
498            yield value
499
500
501def _literals_to_types(inference_state, result):
502    # Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
503    # int(), float(), etc).
504    new_result = NO_VALUES
505    for typ in result:
506        if is_literal(typ):
507            # Literals are only valid as long as the operations are
508            # correct. Otherwise add a value-free instance.
509            cls = compiled.builtin_from_name(inference_state, typ.name.string_name)
510            new_result |= cls.execute_with_values()
511        else:
512            new_result |= ValueSet([typ])
513    return new_result
514
515
516def _infer_comparison(context, left_values, operator, right_values):
517    state = context.inference_state
518    if not left_values or not right_values:
519        # illegal slices e.g. cause left/right_result to be None
520        result = (left_values or NO_VALUES) | (right_values or NO_VALUES)
521        return _literals_to_types(state, result)
522    else:
523        # I don't think there's a reasonable chance that a string
524        # operation is still correct, once we pass something like six
525        # objects.
526        if len(left_values) * len(right_values) > 6:
527            return _literals_to_types(state, left_values | right_values)
528        else:
529            return ValueSet.from_sets(
530                _infer_comparison_part(state, context, left, operator, right)
531                for left in left_values
532                for right in right_values
533            )
534
535
536def _is_annotation_name(name):
537    ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
538    if ancestor is None:
539        return False
540
541    if ancestor.type in ('param', 'funcdef'):
542        ann = ancestor.annotation
543        if ann is not None:
544            return ann.start_pos <= name.start_pos < ann.end_pos
545    elif ancestor.type == 'expr_stmt':
546        c = ancestor.children
547        if len(c) > 1 and c[1].type == 'annassign':
548            return c[1].start_pos <= name.start_pos < c[1].end_pos
549    return False
550
551
552def _is_list(value):
553    return value.array_type == 'list'
554
555
556def _is_tuple(value):
557    return value.array_type == 'tuple'
558
559
560def _bool_to_value(inference_state, bool_):
561    return compiled.builtin_from_name(inference_state, str(bool_))
562
563
564def _get_tuple_ints(value):
565    if not isinstance(value, iterable.SequenceLiteralValue):
566        return None
567    numbers = []
568    for lazy_value in value.py__iter__():
569        if not isinstance(lazy_value, LazyTreeValue):
570            return None
571        node = lazy_value.data
572        if node.type != 'number':
573            return None
574        try:
575            numbers.append(int(node.value))
576        except ValueError:
577            return None
578    return numbers
579
580
581def _infer_comparison_part(inference_state, context, left, operator, right):
582    l_is_num = is_number(left)
583    r_is_num = is_number(right)
584    if isinstance(operator, str):
585        str_operator = operator
586    else:
587        str_operator = str(operator.value)
588
589    if str_operator == '*':
590        # for iterables, ignore * operations
591        if isinstance(left, iterable.Sequence) or is_string(left):
592            return ValueSet([left])
593        elif isinstance(right, iterable.Sequence) or is_string(right):
594            return ValueSet([right])
595    elif str_operator == '+':
596        if l_is_num and r_is_num or is_string(left) and is_string(right):
597            return left.execute_operation(right, str_operator)
598        elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right):
599            return ValueSet([iterable.MergedArray(inference_state, (left, right))])
600    elif str_operator == '-':
601        if l_is_num and r_is_num:
602            return left.execute_operation(right, str_operator)
603    elif str_operator == '%':
604        # With strings and numbers the left type typically remains. Except for
605        # `int() % float()`.
606        return ValueSet([left])
607    elif str_operator in COMPARISON_OPERATORS:
608        if left.is_compiled() and right.is_compiled():
609            # Possible, because the return is not an option. Just compare.
610            result = left.execute_operation(right, str_operator)
611            if result:
612                return result
613        else:
614            if str_operator in ('is', '!=', '==', 'is not'):
615                operation = COMPARISON_OPERATORS[str_operator]
616                bool_ = operation(left, right)
617                # Only if == returns True or != returns False, we can continue.
618                # There's no guarantee that they are not equal. This can help
619                # in some cases, but does not cover everything.
620                if (str_operator in ('is', '==')) == bool_:
621                    return ValueSet([_bool_to_value(inference_state, bool_)])
622
623            if isinstance(left, VersionInfo):
624                version_info = _get_tuple_ints(right)
625                if version_info is not None:
626                    bool_result = compiled.access.COMPARISON_OPERATORS[operator](
627                        inference_state.environment.version_info,
628                        tuple(version_info)
629                    )
630                    return ValueSet([_bool_to_value(inference_state, bool_result)])
631
632        return ValueSet([
633            _bool_to_value(inference_state, True),
634            _bool_to_value(inference_state, False)
635        ])
636    elif str_operator in ('in', 'not in'):
637        return NO_VALUES
638
639    def check(obj):
640        """Checks if a Jedi object is either a float or an int."""
641        return isinstance(obj, TreeInstance) and \
642            obj.name.string_name in ('int', 'float')
643
644    # Static analysis, one is a number, the other one is not.
645    if str_operator in ('+', '-') and l_is_num != r_is_num \
646            and not (check(left) or check(right)):
647        message = "TypeError: unsupported operand type(s) for +: %s and %s"
648        analysis.add(context, 'type-error-operation', operator,
649                     message % (left, right))
650
651    if left.is_class() or right.is_class():
652        return NO_VALUES
653
654    method_name = operator_to_magic_method[str_operator]
655    magic_methods = left.py__getattribute__(method_name)
656    if magic_methods:
657        result = magic_methods.execute_with_values(right)
658        if result:
659            return result
660
661    if not magic_methods:
662        reverse_method_name = reverse_operator_to_magic_method[str_operator]
663        magic_methods = right.py__getattribute__(reverse_method_name)
664
665        result = magic_methods.execute_with_values(left)
666        if result:
667            return result
668
669    result = ValueSet([left, right])
670    debug.dbg('Used operator %s resulting in %s', operator, result)
671    return result
672
673
674@plugin_manager.decorate()
675def tree_name_to_values(inference_state, context, tree_name):
676    value_set = NO_VALUES
677    module_node = context.get_root_context().tree_node
678    # First check for annotations, like: `foo: int = 3`
679    if module_node is not None:
680        names = module_node.get_used_names().get(tree_name.value, [])
681        found_annotation = False
682        for name in names:
683            expr_stmt = name.parent
684
685            if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
686                correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
687                if correct_scope:
688                    found_annotation = True
689                    value_set |= annotation.infer_annotation(
690                        context, expr_stmt.children[1].children[1]
691                    ).execute_annotation()
692        if found_annotation:
693            return value_set
694
695    types = []
696    node = tree_name.get_definition(import_name_always=True, include_setitem=True)
697    if node is None:
698        node = tree_name.parent
699        if node.type == 'global_stmt':
700            c = context.create_context(tree_name)
701            if c.is_module():
702                # In case we are already part of the module, there is no point
703                # in looking up the global statement anymore, because it's not
704                # valid at that point anyway.
705                return NO_VALUES
706            # For global_stmt lookups, we only need the first possible scope,
707            # which means the function itself.
708            filter = next(c.get_filters())
709            names = filter.get(tree_name.value)
710            return ValueSet.from_sets(name.infer() for name in names)
711        elif node.type not in ('import_from', 'import_name'):
712            c = context.create_context(tree_name)
713            return infer_atom(c, tree_name)
714
715    typ = node.type
716    if typ == 'for_stmt':
717        types = annotation.find_type_from_comment_hint_for(context, node, tree_name)
718        if types:
719            return types
720    if typ == 'with_stmt':
721        types = annotation.find_type_from_comment_hint_with(context, node, tree_name)
722        if types:
723            return types
724
725    if typ in ('for_stmt', 'comp_for', 'sync_comp_for'):
726        try:
727            types = context.predefined_names[node][tree_name.value]
728        except KeyError:
729            cn = ContextualizedNode(context, node.children[3])
730            for_types = iterate_values(
731                cn.infer(),
732                contextualized_node=cn,
733                is_async=node.parent.type == 'async_stmt',
734            )
735            n = TreeNameDefinition(context, tree_name)
736            types = check_tuple_assignments(n, for_types)
737    elif typ == 'expr_stmt':
738        types = infer_expr_stmt(context, node, tree_name)
739    elif typ == 'with_stmt':
740        value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
741        enter_methods = value_managers.py__getattribute__('__enter__')
742        return enter_methods.execute_with_values()
743    elif typ in ('import_from', 'import_name'):
744        types = imports.infer_import(context, tree_name)
745    elif typ in ('funcdef', 'classdef'):
746        types = _apply_decorators(context, node)
747    elif typ == 'try_stmt':
748        # TODO an exception can also be a tuple. Check for those.
749        # TODO check for types that are not classes and add it to
750        # the static analysis report.
751        exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling())
752        types = exceptions.execute_with_values()
753    elif typ == 'param':
754        types = NO_VALUES
755    elif typ == 'del_stmt':
756        types = NO_VALUES
757    elif typ == 'namedexpr_test':
758        types = infer_node(context, node)
759    else:
760        raise ValueError("Should not happen. type: %s" % typ)
761    return types
762
763
764# We don't want to have functions/classes that are created by the same
765# tree_node.
766@inference_state_method_cache()
767def _apply_decorators(context, node):
768    """
769    Returns the function, that should to be executed in the end.
770    This is also the places where the decorators are processed.
771    """
772    if node.type == 'classdef':
773        decoratee_value = ClassValue(
774            context.inference_state,
775            parent_context=context,
776            tree_node=node
777        )
778    else:
779        decoratee_value = FunctionValue.from_context(context, node)
780    initial = values = ValueSet([decoratee_value])
781
782    if is_big_annoying_library(context):
783        return values
784
785    for dec in reversed(node.get_decorators()):
786        debug.dbg('decorator: %s %s', dec, values, color="MAGENTA")
787        with debug.increase_indent_cm():
788            dec_values = context.infer_node(dec.children[1])
789            trailer_nodes = dec.children[2:-1]
790            if trailer_nodes:
791                # Create a trailer and infer it.
792                trailer = tree.PythonNode('trailer', trailer_nodes)
793                trailer.parent = dec
794                dec_values = infer_trailer(context, dec_values, trailer)
795
796            if not len(dec_values):
797                code = dec.get_code(include_prefix=False)
798                # For the short future, we don't want to hear about the runtime
799                # decorator in typing that was intentionally omitted. This is not
800                # "correct", but helps with debugging.
801                if code != '@runtime\n':
802                    debug.warning('decorator not found: %s on %s', dec, node)
803                return initial
804
805            values = dec_values.execute(arguments.ValuesArguments([values]))
806            if not len(values):
807                debug.warning('not possible to resolve wrappers found %s', node)
808                return initial
809
810        debug.dbg('decorator end %s', values, color="MAGENTA")
811    if values != initial:
812        return ValueSet([Decoratee(c, decoratee_value) for c in values])
813    return values
814
815
816def check_tuple_assignments(name, value_set):
817    """
818    Checks if tuples are assigned.
819    """
820    lazy_value = None
821    for index, node in name.assignment_indexes():
822        cn = ContextualizedNode(name.parent_context, node)
823        iterated = value_set.iterate(cn)
824        if isinstance(index, slice):
825            # For no star unpacking is not possible.
826            return NO_VALUES
827        i = 0
828        while i <= index:
829            try:
830                lazy_value = next(iterated)
831            except StopIteration:
832                # We could do this with the default param in next. But this
833                # would allow this loop to run for a very long time if the
834                # index number is high. Therefore break if the loop is
835                # finished.
836                return NO_VALUES
837            else:
838                i += lazy_value.max
839        value_set = lazy_value.infer()
840    return value_set
841
842
843class ContextualizedSubscriptListNode(ContextualizedNode):
844    def infer(self):
845        return _infer_subscript_list(self.context, self.node)
846
847
848def _infer_subscript_list(context, index):
849    """
850    Handles slices in subscript nodes.
851    """
852    if index == ':':
853        # Like array[:]
854        return ValueSet([iterable.Slice(context, None, None, None)])
855
856    elif index.type == 'subscript' and not index.children[0] == '.':
857        # subscript basically implies a slice operation
858        # e.g. array[:3]
859        result = []
860        for el in index.children:
861            if el == ':':
862                if not result:
863                    result.append(None)
864            elif el.type == 'sliceop':
865                if len(el.children) == 2:
866                    result.append(el.children[1])
867            else:
868                result.append(el)
869        result += [None] * (3 - len(result))
870
871        return ValueSet([iterable.Slice(context, *result)])
872    elif index.type == 'subscriptlist':
873        return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)])
874
875    # No slices
876    return context.infer_node(index)
877