From 601546884d8e23bb941eb955b697b02f2426e675 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 12 Feb 2018 16:47:38 -0800 Subject: [PATCH] Update Jedi to 0.11.1 (#762) * Basic tokenizer * Fixed property names * Tests, round I * Tests, round II * tokenizer test * Remove temorary change * Fix merge issue * Merge conflict * Merge conflict * Completion test * Fix last line * Fix javascript math * Make test await for results * Add license headers * Rename definitions to types * License headers * Fix typo in completion details (typo) * Fix hover test * Russian translations * Update to better translation * Fix typo * #70 How to get all parameter info when filling in a function param list * Fix #70 How to get all parameter info when filling in a function param list * Clean up * Clean imports * CR feedback * Trim whitespace for test stability * More tests * Better handle no-parameters documentation * Better handle ellipsis and Python3 * #385 Auto-Indentation doesn't work after comment * #141 Auto indentation broken when return keyword involved * Undo changes * #627 Docstrings for builtin methods are not parsed correctly * reStructuredText converter * Fix: period is not an operator * Minor fixes * Restructure * Tests * Tests * Code heuristics * Baselines * HTML handling * Lists * State machine * Baselines * Squash * no message * Whitespace difference * Update Jedi to 0.11.1 * Enable Travis * Test fixes * Undo change --- pythonFiles/release/jedi/__init__.py | 6 +- pythonFiles/release/jedi/__main__.py | 19 +- pythonFiles/release/jedi/_compatibility.py | 146 ++- pythonFiles/release/jedi/api/__init__.py | 698 +++++--------- pythonFiles/release/jedi/api/classes.py | 591 ++++++------ pythonFiles/release/jedi/api/completion.py | 291 ++++++ pythonFiles/release/jedi/api/helpers.py | 360 ++++++-- pythonFiles/release/jedi/api/interpreter.py | 126 +-- pythonFiles/release/jedi/api/keywords.py | 110 ++- pythonFiles/release/jedi/api/usages.py | 49 - pythonFiles/release/jedi/cache.py | 283 +----- pythonFiles/release/jedi/common/__init__.py | 1 + pythonFiles/release/jedi/common/context.py | 67 ++ pythonFiles/release/jedi/debug.py | 89 +- pythonFiles/release/jedi/evaluate/__init__.py | 540 ++++++----- pythonFiles/release/jedi/evaluate/analysis.py | 236 ++--- .../release/jedi/evaluate/arguments.py | 245 +++++ .../release/jedi/evaluate/base_context.py | 260 ++++++ pythonFiles/release/jedi/evaluate/cache.py | 45 +- .../jedi/evaluate/compiled/__init__.py | 625 +++++++------ .../release/jedi/evaluate/compiled/fake.py | 182 +++- .../jedi/evaluate/compiled/fake/_weakref.pym | 3 +- .../jedi/evaluate/compiled/fake/builtins.pym | 32 +- .../jedi/evaluate/compiled/fake/io.pym | 6 + .../jedi/evaluate/compiled/fake/operator.pym | 33 + .../jedi/evaluate/compiled/getattr_static.py | 175 ++++ .../release/jedi/evaluate/compiled/mixed.py | 231 +++++ .../release/jedi/evaluate/context/__init__.py | 5 + .../release/jedi/evaluate/context/function.py | 226 +++++ .../release/jedi/evaluate/context/instance.py | 435 +++++++++ .../release/jedi/evaluate/context/iterable.py | 691 ++++++++++++++ .../release/jedi/evaluate/context/klass.py | 197 ++++ .../release/jedi/evaluate/context/module.py | 213 +++++ .../jedi/evaluate/context/namespace.py | 74 ++ .../release/jedi/evaluate/docstrings.py | 192 +++- pythonFiles/release/jedi/evaluate/dynamic.py | 249 +++-- pythonFiles/release/jedi/evaluate/filters.py | 434 +++++++++ pythonFiles/release/jedi/evaluate/finder.py | 641 ++++--------- .../release/jedi/evaluate/flow_analysis.py | 94 +- pythonFiles/release/jedi/evaluate/helpers.py | 300 +++--- pythonFiles/release/jedi/evaluate/imports.py | 485 +++++----- pythonFiles/release/jedi/evaluate/iterable.py | 631 ------------- .../release/jedi/evaluate/jedi_typing.py | 100 ++ .../release/jedi/evaluate/lazy_context.py | 61 ++ pythonFiles/release/jedi/evaluate/param.py | 480 +++------- .../release/jedi/evaluate/parser_cache.py | 6 + pythonFiles/release/jedi/evaluate/pep0484.py | 222 +++++ .../release/jedi/evaluate/precedence.py | 174 ---- pythonFiles/release/jedi/evaluate/project.py | 40 + .../release/jedi/evaluate/recursion.py | 232 +++-- .../release/jedi/evaluate/representation.py | 857 ------------------ pythonFiles/release/jedi/evaluate/site.py | 110 +++ pythonFiles/release/jedi/evaluate/stdlib.py | 243 +++-- .../release/jedi/evaluate/syntax_tree.py | 588 ++++++++++++ pythonFiles/release/jedi/evaluate/sys_path.py | 263 +++--- pythonFiles/release/jedi/evaluate/usages.py | 62 ++ .../jedi/{common.py => evaluate/utils.py} | 82 +- pythonFiles/release/jedi/parser_utils.py | 241 +++++ pythonFiles/release/jedi/refactoring.py | 76 +- pythonFiles/release/jedi/settings.py | 78 +- pythonFiles/release/jedi/utils.py | 34 +- src/test/.vscode/settings.json | 6 +- .../extension.refactor.extract.method.test.ts | 139 +-- .../extension.refactor.extract.var.test.ts | 126 +-- 64 files changed, 8738 insertions(+), 5798 deletions(-) create mode 100644 pythonFiles/release/jedi/api/completion.py delete mode 100755 pythonFiles/release/jedi/api/usages.py create mode 100644 pythonFiles/release/jedi/common/__init__.py create mode 100644 pythonFiles/release/jedi/common/context.py create mode 100644 pythonFiles/release/jedi/evaluate/arguments.py create mode 100644 pythonFiles/release/jedi/evaluate/base_context.py create mode 100644 pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym create mode 100644 pythonFiles/release/jedi/evaluate/compiled/getattr_static.py create mode 100644 pythonFiles/release/jedi/evaluate/compiled/mixed.py create mode 100644 pythonFiles/release/jedi/evaluate/context/__init__.py create mode 100644 pythonFiles/release/jedi/evaluate/context/function.py create mode 100644 pythonFiles/release/jedi/evaluate/context/instance.py create mode 100644 pythonFiles/release/jedi/evaluate/context/iterable.py create mode 100644 pythonFiles/release/jedi/evaluate/context/klass.py create mode 100644 pythonFiles/release/jedi/evaluate/context/module.py create mode 100644 pythonFiles/release/jedi/evaluate/context/namespace.py create mode 100644 pythonFiles/release/jedi/evaluate/filters.py delete mode 100755 pythonFiles/release/jedi/evaluate/iterable.py create mode 100644 pythonFiles/release/jedi/evaluate/jedi_typing.py create mode 100644 pythonFiles/release/jedi/evaluate/lazy_context.py create mode 100644 pythonFiles/release/jedi/evaluate/parser_cache.py create mode 100644 pythonFiles/release/jedi/evaluate/pep0484.py delete mode 100755 pythonFiles/release/jedi/evaluate/precedence.py create mode 100644 pythonFiles/release/jedi/evaluate/project.py delete mode 100755 pythonFiles/release/jedi/evaluate/representation.py create mode 100644 pythonFiles/release/jedi/evaluate/site.py create mode 100644 pythonFiles/release/jedi/evaluate/syntax_tree.py create mode 100644 pythonFiles/release/jedi/evaluate/usages.py rename pythonFiles/release/jedi/{common.py => evaluate/utils.py} (62%) mode change 100755 => 100644 create mode 100644 pythonFiles/release/jedi/parser_utils.py diff --git a/pythonFiles/release/jedi/__init__.py b/pythonFiles/release/jedi/__init__.py index ca99329cda9c..1a1080ad2fd4 100755 --- a/pythonFiles/release/jedi/__init__.py +++ b/pythonFiles/release/jedi/__init__.py @@ -36,8 +36,8 @@ good text editor, while still having very good IDE features for Python. """ -__version__ = '0.9.0' +__version__ = '0.11.1' -from jedi.api import Script, Interpreter, NotFoundError, set_debug_function -from jedi.api import preload_module, defined_names, names +from jedi.api import Script, Interpreter, set_debug_function, \ + preload_module, names from jedi import settings diff --git a/pythonFiles/release/jedi/__main__.py b/pythonFiles/release/jedi/__main__.py index b26397138312..f2ee0477695b 100755 --- a/pythonFiles/release/jedi/__main__.py +++ b/pythonFiles/release/jedi/__main__.py @@ -1,18 +1,13 @@ -from sys import argv +import sys from os.path import join, dirname, abspath, isdir -if len(argv) == 2 and argv[1] == 'repl': - # don't want to use __main__ only for repl yet, maybe we want to use it for - # something else. So just use the keyword ``repl`` for now. - print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) -elif len(argv) > 1 and argv[1] == 'linter': +def _start_linter(): """ This is a pre-alpha API. You're not supposed to use it at all, except for testing. It will very likely change. """ import jedi - import sys if '--debug' in sys.argv: jedi.set_debug_function() @@ -37,7 +32,17 @@ print(error) except Exception: if '--pdb' in sys.argv: + import traceback + traceback.print_exc() import pdb pdb.post_mortem() else: raise + + +if len(sys.argv) == 2 and sys.argv[1] == 'repl': + # don't want to use __main__ only for repl yet, maybe we want to use it for + # something else. So just use the keyword ``repl`` for now. + print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) +elif len(sys.argv) > 1 and sys.argv[1] == 'linter': + _start_linter() diff --git a/pythonFiles/release/jedi/_compatibility.py b/pythonFiles/release/jedi/_compatibility.py index 1a1e943f43c7..52a20fe2c07c 100755 --- a/pythonFiles/release/jedi/_compatibility.py +++ b/pythonFiles/release/jedi/_compatibility.py @@ -6,26 +6,72 @@ import imp import os import re +import pkgutil +import warnings try: import importlib except ImportError: pass +# Cannot use sys.version.major and minor names, because in Python 2.6 it's not +# a namedtuple. is_py3 = sys.version_info[0] >= 3 -is_py33 = is_py3 and sys.version_info.minor >= 3 +is_py33 = is_py3 and sys.version_info[1] >= 3 +is_py34 = is_py3 and sys.version_info[1] >= 4 +is_py35 = is_py3 and sys.version_info[1] >= 5 is_py26 = not is_py3 and sys.version_info[1] < 7 +py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) -def find_module_py33(string, path=None): - loader = importlib.machinery.PathFinder.find_module(string, path) +class DummyFile(object): + def __init__(self, loader, string): + self.loader = loader + self.string = string + + def read(self): + return self.loader.get_source(self.string) + + def close(self): + del self.loader + + +def find_module_py34(string, path=None, fullname=None): + implicit_namespace_pkg = False + spec = None + loader = None + + spec = importlib.machinery.PathFinder.find_spec(string, path) + if hasattr(spec, 'origin'): + origin = spec.origin + implicit_namespace_pkg = origin == 'namespace' + + # We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs + if implicit_namespace_pkg: + fullname = string if not path else fullname + implicit_ns_info = ImplicitNSInfo(fullname, spec.submodule_search_locations._path) + return None, implicit_ns_info, False + + # we have found the tail end of the dotted path + if hasattr(spec, 'loader'): + loader = spec.loader + return find_module_py33(string, path, loader) + +def find_module_py33(string, path=None, loader=None, fullname=None): + loader = loader or importlib.machinery.PathFinder.find_module(string, path) if loader is None and path is None: # Fallback to find builtins try: - loader = importlib.find_loader(string) + with warnings.catch_warnings(record=True): + # Mute "DeprecationWarning: Use importlib.util.find_spec() + # instead." While we should replace that in the future, it's + # probably good to wait until we deprecate Python 3.3, since + # it was added in Python 3.4 and find_loader hasn't been + # removed in 3.6. + loader = importlib.find_loader(string) except ValueError as e: # See #491. Importlib might raise a ValueError, to avoid this, we # just raise an ImportError to fix the issue. - raise ImportError("Originally ValueError: " + e.message) + raise ImportError("Originally " + repr(e)) if loader is None: raise ImportError("Couldn't find a loader for {0}".format(string)) @@ -33,33 +79,77 @@ def find_module_py33(string, path=None): try: is_package = loader.is_package(string) if is_package: - module_path = os.path.dirname(loader.path) - module_file = None + if hasattr(loader, 'path'): + module_path = os.path.dirname(loader.path) + else: + # At least zipimporter does not have path attribute + module_path = os.path.dirname(loader.get_filename(string)) + if hasattr(loader, 'archive'): + module_file = DummyFile(loader, string) + else: + module_file = None else: module_path = loader.get_filename(string) - module_file = open(module_path, 'rb') + module_file = DummyFile(loader, string) except AttributeError: # ExtensionLoader has not attribute get_filename, instead it has a # path attribute that we can use to retrieve the module path try: module_path = loader.path - module_file = open(loader.path, 'rb') + module_file = DummyFile(loader, string) except AttributeError: module_path = string module_file = None finally: is_package = False + if hasattr(loader, 'archive'): + module_path = loader.archive + return module_file, module_path, is_package -def find_module_pre_py33(string, path=None): - module_file, module_path, description = imp.find_module(string, path) - module_type = description[2] - return module_file, module_path, module_type is imp.PKG_DIRECTORY +def find_module_pre_py33(string, path=None, fullname=None): + try: + module_file, module_path, description = imp.find_module(string, path) + module_type = description[2] + return module_file, module_path, module_type is imp.PKG_DIRECTORY + except ImportError: + pass + + if path is None: + path = sys.path + for item in path: + loader = pkgutil.get_importer(item) + if loader: + try: + loader = loader.find_module(string) + if loader: + is_package = loader.is_package(string) + is_archive = hasattr(loader, 'archive') + try: + module_path = loader.get_filename(string) + except AttributeError: + # fallback for py26 + try: + module_path = loader._get_filename(string) + except AttributeError: + continue + if is_package: + module_path = os.path.dirname(module_path) + if is_archive: + module_path = loader.archive + file = None + if not is_package or is_archive: + file = DummyFile(loader, string) + return (file, module_path, is_package) + except ImportError: + pass + raise ImportError("No module named {0}".format(string)) find_module = find_module_py33 if is_py33 else find_module_pre_py33 +find_module = find_module_py34 if is_py34 else find_module find_module.__doc__ = """ Provides information about a module. @@ -71,28 +161,18 @@ def find_module_pre_py33(string, path=None): """ +class ImplicitNSInfo(object): + """Stores information returned from an implicit namespace spec""" + def __init__(self, name, paths): + self.name = name + self.paths = paths + # unicode function try: unicode = unicode except NameError: unicode = str -if is_py3: - u = lambda s: s -else: - u = lambda s: s.decode('utf-8') - -u.__doc__ = """ -Decode a raw string into unicode object. Do nothing in Python 3. -""" - -# exec function -if is_py3: - def exec_function(source, global_map): - exec(source, global_map) -else: - eval(compile("""def exec_function(source, global_map): - exec source in global_map """, 'blub', 'exec')) # re-raise function if is_py3: @@ -147,7 +227,8 @@ def u(string): """ if is_py3: return str(string) - elif not isinstance(string, unicode): + + if not isinstance(string, unicode): return unicode(str(string), 'UTF-8') return string @@ -174,6 +255,11 @@ def literal_eval(string): except ImportError: from itertools import izip_longest as zip_longest # Python 2 +try: + FileNotFoundError = FileNotFoundError +except NameError: + FileNotFoundError = IOError + def no_unicode_pprint(dct): """ diff --git a/pythonFiles/release/jedi/api/__init__.py b/pythonFiles/release/jedi/api/__init__.py index 5c710623f86d..871dc84b78e3 100755 --- a/pythonFiles/release/jedi/api/__init__.py +++ b/pythonFiles/release/jedi/api/__init__.py @@ -3,51 +3,43 @@ use its methods. Additionally you can add a debug function with :func:`set_debug_function`. +Alternatively, if you don't need a custom function and are happy with printing +debug messages to stdout, simply call :func:`set_debug_function` without +arguments. .. warning:: Please, note that Jedi is **not thread safe**. """ -import re import os -import warnings import sys -from itertools import chain -from jedi._compatibility import unicode, builtins -from jedi.parser import Parser, load_grammar -from jedi.parser.tokenize import source_tokens -from jedi.parser import tree -from jedi.parser.user_context import UserContext, UserContextParser +import parso +from parso.python import tree +from parso import python_bytes_to_unicode, split_lines + +from jedi.parser_utils import get_executable_nodes, get_statement_of_position from jedi import debug from jedi import settings -from jedi import common from jedi import cache -from jedi.api import keywords from jedi.api import classes from jedi.api import interpreter -from jedi.api import usages from jedi.api import helpers +from jedi.api.completion import Completion from jedi.evaluate import Evaluator -from jedi.evaluate import representation as er -from jedi.evaluate import compiled from jedi.evaluate import imports -from jedi.evaluate.cache import memoize_default -from jedi.evaluate.helpers import FakeName, get_module_names -from jedi.evaluate.finder import global_names_dict_generator, filter_definition_names -from jedi.evaluate import analysis +from jedi.evaluate import usages +from jedi.evaluate.project import Project +from jedi.evaluate.arguments import try_iter_content +from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf +from jedi.evaluate.sys_path import dotted_path_in_sys_path +from jedi.evaluate.filters import TreeNameDefinition +from jedi.evaluate.syntax_tree import tree_name_to_contexts +from jedi.evaluate.context import ModuleContext +from jedi.evaluate.context.module import ModuleName +from jedi.evaluate.context.iterable import unpack_tuple_to_dict # Jedi uses lots and lots of recursion. By setting this a little bit higher, we # can remove some "maximum recursion depth" errors. -sys.setrecursionlimit(2000) - - -class NotFoundError(Exception): - """A custom error to avoid catching the wrong exceptions. - - .. deprecated:: 0.9.0 - Not in use anymore, Jedi just returns no goto result if you're not on a - valid name. - .. todo:: Remove! - """ +sys.setrecursionlimit(3000) class Script(object): @@ -58,12 +50,24 @@ class Script(object): You can either use the ``source`` parameter or ``path`` to read a file. Usually you're going to want to use both of them (in an editor). + The script might be analyzed in a different ``sys.path`` than |jedi|: + + - if `sys_path` parameter is not ``None``, it will be used as ``sys.path`` + for the script; + + - if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment + variable is defined, ``sys.path`` for the specified environment will be + guessed (see :func:`jedi.evaluate.sys_path.get_venv_path`) and used for + the script; + + - otherwise ``sys.path`` will match that of |jedi|. + :param source: The source code of the current file, separated by newlines. :type source: str :param line: The line to perform actions on (starting with 1). :type line: int - :param col: The column of the cursor (starting with 0). - :type col: int + :param column: The column of the cursor (starting with 0). + :type column: int :param path: The path of the file in the file system, or ``''`` if it hasn't been saved yet. :type path: str or None @@ -73,62 +77,67 @@ class Script(object): :param source_encoding: The encoding of ``source``, if it is not a ``unicode`` object (default ``'utf-8'``). :type encoding: str + :param sys_path: ``sys.path`` to use during analysis of the script + :type sys_path: list + """ def __init__(self, source=None, line=None, column=None, path=None, - encoding='utf-8', source_path=None, source_encoding=None): - if source_path is not None: - warnings.warn("Use path instead of source_path.", DeprecationWarning) - path = source_path - if source_encoding is not None: - warnings.warn("Use encoding instead of source_encoding.", DeprecationWarning) - encoding = source_encoding - + encoding='utf-8', sys_path=None): self._orig_path = path - self.path = None if path is None else os.path.abspath(path) + # An empty path (also empty string) should always result in no path. + self.path = os.path.abspath(path) if path else None if source is None: - try: - with open(path) as f: - source = f.read() - except UnicodeDecodeError: - with open(path, encoding=encoding) as f: - source = f.read() - - self.source = common.source_to_unicode(source, encoding) - lines = common.splitlines(self.source) - line = max(len(lines), 1) if line is None else line - if not (0 < line <= len(lines)): + # TODO add a better warning than the traceback! + with open(path, 'rb') as f: + source = f.read() + + # TODO do we really want that? + self._source = python_bytes_to_unicode(source, encoding, errors='replace') + self._code_lines = split_lines(self._source) + line = max(len(self._code_lines), 1) if line is None else line + if not (0 < line <= len(self._code_lines)): raise ValueError('`line` parameter is not in a valid range.') - line_len = len(lines[line - 1]) + line_len = len(self._code_lines[line - 1]) column = line_len if column is None else column if not (0 <= column <= line_len): raise ValueError('`column` parameter is not in a valid range.') self._pos = line, column + self._path = path cache.clear_time_caches() debug.reset_time() - self._grammar = load_grammar('grammar%s.%s' % sys.version_info[:2]) - self._user_context = UserContext(self.source, self._pos) - self._parser = UserContextParser(self._grammar, self.source, path, - self._pos, self._user_context, - self._parsed_callback) - self._evaluator = Evaluator(self._grammar) - debug.speed('init') - def _parsed_callback(self, parser): - module = self._evaluator.wrap(parser.module) - imports.add_module(self._evaluator, unicode(module.name), module) + # Load the Python grammar of the current interpreter. + self._grammar = parso.load_grammar() + project = Project(sys_path=sys_path) + self._evaluator = Evaluator(self._grammar, project) + project.add_script_path(self.path) + debug.speed('init') - @property - def source_path(self): - """ - .. deprecated:: 0.7.0 - Use :attr:`.path` instead. - .. todo:: Remove! - """ - warnings.warn("Use path instead of source_path.", DeprecationWarning) - return self.path + @cache.memoize_method + def _get_module_node(self): + return self._grammar.parse( + code=self._source, + path=self.path, + cache=False, # No disk cache, because the current script often changes. + diff_cache=True, + cache_path=settings.cache_directory + ) + + @cache.memoize_method + def _get_module(self): + module = ModuleContext( + self._evaluator, + self._get_module_node(), + self.path + ) + if self.path is not None: + name = dotted_path_in_sys_path(self._evaluator.project.sys_path, self.path) + if name is not None: + imports.add_module(self._evaluator, name, module) + return module def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path)) @@ -141,187 +150,14 @@ def completions(self): :return: Completion objects, sorted by name and __ comes last. :rtype: list of :class:`classes.Completion` """ - def get_completions(user_stmt, bs): - # TODO this closure is ugly. it also doesn't work with - # simple_complete (used for Interpreter), somehow redo. - module = self._evaluator.wrap(self._parser.module()) - names, level, only_modules, unfinished_dotted = \ - helpers.check_error_statements(module, self._pos) - completion_names = [] - if names is not None: - imp_names = tuple(str(n) for n in names if n.end_pos < self._pos) - i = imports.Importer(self._evaluator, imp_names, module, level) - completion_names = i.completion_names(self._evaluator, only_modules) - - # TODO this paragraph is necessary, but not sure it works. - context = self._user_context.get_context() - if not next(context).startswith('.'): # skip the path - if next(context) == 'from': - # completion is just "import" if before stands from .. - if unfinished_dotted: - return completion_names - else: - return keywords.keyword_names('import') - - if isinstance(user_stmt, tree.Import): - module = self._parser.module() - completion_names += imports.completion_names(self._evaluator, - user_stmt, self._pos) - return completion_names - - if names is None and not isinstance(user_stmt, tree.Import): - if not path and not dot: - # add keywords - completion_names += keywords.keyword_names(all=True) - # TODO delete? We should search for valid parser - # transformations. - completion_names += self._simple_complete(path, dot, like) - return completion_names - debug.speed('completions start') - path = self._user_context.get_path_until_cursor() - # Dots following an int are not the start of a completion but a float - # literal. - if re.search(r'^\d\.$', path): - return [] - path, dot, like = helpers.completion_parts(path) - - user_stmt = self._parser.user_stmt_with_whitespace() - - b = compiled.builtin - completion_names = get_completions(user_stmt, b) - - if not dot: - # add named params - for call_sig in self.call_signatures(): - # Allow protected access, because it's a public API. - module = call_sig._name.get_parent_until() - # Compiled modules typically don't allow keyword arguments. - if not isinstance(module, compiled.CompiledObject): - for p in call_sig.params: - # Allow access on _definition here, because it's a - # public API and we don't want to make the internal - # Name object public. - if p._definition.stars == 0: # no *args/**kwargs - completion_names.append(p._name) - - needs_dot = not dot and path - - comps = [] - comp_dct = {} - for c in set(completion_names): - n = str(c) - if settings.case_insensitive_completion \ - and n.lower().startswith(like.lower()) \ - or n.startswith(like): - if isinstance(c.parent, (tree.Function, tree.Class)): - # TODO I think this is a hack. It should be an - # er.Function/er.Class before that. - c = self._evaluator.wrap(c.parent).name - new = classes.Completion(self._evaluator, c, needs_dot, len(like)) - k = (new.name, new.complete) # key - if k in comp_dct and settings.no_completion_duplicates: - comp_dct[k]._same_name_completions.append(new) - else: - comp_dct[k] = new - comps.append(new) - + completion = Completion( + self._evaluator, self._get_module(), self._code_lines, + self._pos, self.call_signatures + ) + completions = completion.completions() debug.speed('completions end') - - return sorted(comps, key=lambda x: (x.name.startswith('__'), - x.name.startswith('_'), - x.name.lower())) - - def _simple_complete(self, path, dot, like): - if not path and not dot: - scope = self._parser.user_scope() - if not scope.is_scope(): # Might be a flow (if/while/etc). - scope = scope.get_parent_scope() - names_dicts = global_names_dict_generator( - self._evaluator, - self._evaluator.wrap(scope), - self._pos - ) - completion_names = [] - for names_dict, pos in names_dicts: - names = list(chain.from_iterable(names_dict.values())) - if not names: - continue - completion_names += filter_definition_names(names, self._parser.user_stmt(), pos) - elif self._get_under_cursor_stmt(path) is None: - return [] - else: - scopes = list(self._prepare_goto(path, True)) - completion_names = [] - debug.dbg('possible completion scopes: %s', scopes) - for s in scopes: - names = [] - for names_dict in s.names_dicts(search_global=False): - names += chain.from_iterable(names_dict.values()) - - completion_names += filter_definition_names(names, self._parser.user_stmt()) - return completion_names - - def _prepare_goto(self, goto_path, is_completion=False): - """ - Base for completions/goto. Basically it returns the resolved scopes - under cursor. - """ - debug.dbg('start: %s in %s', goto_path, self._parser.user_scope()) - - user_stmt = self._parser.user_stmt_with_whitespace() - if not user_stmt and len(goto_path.split('\n')) > 1: - # If the user_stmt is not defined and the goto_path is multi line, - # something's strange. Most probably the backwards tokenizer - # matched to much. - return [] - - if isinstance(user_stmt, tree.Import): - i, _ = helpers.get_on_import_stmt(self._evaluator, self._user_context, - user_stmt, is_completion) - if i is None: - return [] - scopes = [i] - else: - # just parse one statement, take it and evaluate it - eval_stmt = self._get_under_cursor_stmt(goto_path) - if eval_stmt is None: - return [] - - module = self._evaluator.wrap(self._parser.module()) - names, level, _, _ = helpers.check_error_statements(module, self._pos) - if names: - names = [str(n) for n in names] - i = imports.Importer(self._evaluator, names, module, level) - return i.follow() - - scopes = self._evaluator.eval_element(eval_stmt) - - return scopes - - @memoize_default() - def _get_under_cursor_stmt(self, cursor_txt, start_pos=None): - tokenizer = source_tokens(cursor_txt) - r = Parser(self._grammar, cursor_txt, tokenizer=tokenizer) - try: - # Take the last statement available that is not an endmarker. - # And because it's a simple_stmt, we need to get the first child. - stmt = r.module.children[-2].children[0] - except (AttributeError, IndexError): - return None - - user_stmt = self._parser.user_stmt() - if user_stmt is None: - # Set the start_pos to a pseudo position, that doesn't exist but - # works perfectly well (for both completions in docstrings and - # statements). - pos = start_pos or self._pos - else: - pos = user_stmt.start_pos - - stmt.move(pos[0] - 1, pos[1]) # Moving the offset. - stmt.parent = self._parser.user_scope() - return stmt + return completions def goto_definitions(self): """ @@ -335,120 +171,59 @@ def goto_definitions(self): :rtype: list of :class:`classes.Definition` """ - def resolve_import_paths(scopes): - for s in scopes.copy(): - if isinstance(s, imports.ImportWrapper): - scopes.remove(s) - scopes.update(resolve_import_paths(set(s.follow()))) - return scopes - - goto_path = self._user_context.get_path_under_cursor() - context = self._user_context.get_context() - definitions = set() - if next(context) in ('class', 'def'): - definitions = set([self._evaluator.wrap(self._parser.user_scope())]) - else: - # Fetch definition of callee, if there's no path otherwise. - if not goto_path: - definitions = set(signature._definition - for signature in self.call_signatures()) - - if re.match('\w[\w\d_]*$', goto_path) and not definitions: - user_stmt = self._parser.user_stmt() - if user_stmt is not None and user_stmt.type == 'expr_stmt': - for name in user_stmt.get_defined_names(): - if name.start_pos <= self._pos <= name.end_pos: - # TODO scaning for a name and then using it should be - # the default. - definitions = set(self._evaluator.goto_definition(name)) - - if not definitions and goto_path: - definitions = set(self._prepare_goto(goto_path)) - - definitions = resolve_import_paths(definitions) + module_node = self._get_module_node() + leaf = module_node.get_name_of_position(self._pos) + if leaf is None: + leaf = module_node.get_leaf_for_position(self._pos) + if leaf is None: + return [] + + context = self._evaluator.create_context(self._get_module(), leaf) + definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf) + names = [s.name for s in definitions] defs = [classes.Definition(self._evaluator, name) for name in names] + # The additional set here allows the definitions to become unique in an + # API sense. In the internals we want to separate more things than in + # the API. return helpers.sorted_definitions(set(defs)) - def goto_assignments(self): + def goto_assignments(self, follow_imports=False): """ - Return the first definition found. Imports and statements aren't - followed. Multiple objects may be returned, because Python itself is a + Return the first definition found, while optionally following imports. + Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. :rtype: list of :class:`classes.Definition` """ - results = self._goto() - d = [classes.Definition(self._evaluator, d) for d in set(results)] - return helpers.sorted_definitions(d) - - def _goto(self, add_import_name=False): - """ - Used for goto_assignments and usages. + def filter_follow_imports(names, check): + for name in names: + if check(name): + for result in filter_follow_imports(name.goto(), check): + yield result + else: + yield name - :param add_import_name: Add the the name (if import) to the result. - """ - def follow_inexistent_imports(defs): - """ Imports can be generated, e.g. following - `multiprocessing.dummy` generates an import dummy in the - multiprocessing module. The Import doesn't exist -> follow. - """ - definitions = set(defs) - for d in defs: - if isinstance(d.parent, tree.Import) \ - and d.start_pos == (0, 0): - i = imports.ImportWrapper(self._evaluator, d.parent).follow(is_goto=True) - definitions.remove(d) - definitions |= follow_inexistent_imports(i) - return definitions - - goto_path = self._user_context.get_path_under_cursor() - context = self._user_context.get_context() - user_stmt = self._parser.user_stmt() - user_scope = self._parser.user_scope() - - stmt = self._get_under_cursor_stmt(goto_path) - if stmt is None: + tree_name = self._get_module_node().get_name_of_position(self._pos) + if tree_name is None: return [] - - if user_scope is None: - last_name = None - else: - # Try to use the parser if possible. - last_name = user_scope.name_for_position(self._pos) - - if last_name is None: - last_name = stmt - while not isinstance(last_name, tree.Name): - try: - last_name = last_name.children[-1] - except AttributeError: - # Doesn't have a name in it. - return [] - - if next(context) in ('class', 'def'): - # The cursor is on a class/function name. - user_scope = self._parser.user_scope() - definitions = set([user_scope.name]) - elif isinstance(user_stmt, tree.Import): - s, name = helpers.get_on_import_stmt(self._evaluator, - self._user_context, user_stmt) - - definitions = self._evaluator.goto(name) + context = self._evaluator.create_context(self._get_module(), tree_name) + names = list(self._evaluator.goto(context, tree_name)) + + if follow_imports: + def check(name): + if isinstance(name, ModuleName): + return False + return name.api_type == 'module' else: - # The Evaluator.goto function checks for definitions, but since we - # use a reverse tokenizer, we have new name_part objects, so we - # have to check the user_stmt here for positions. - if isinstance(user_stmt, tree.ExprStmt) \ - and isinstance(last_name.parent, tree.ExprStmt): - for name in user_stmt.get_defined_names(): - if name.start_pos <= self._pos <= name.end_pos: - return [name] - - defs = self._evaluator.goto(last_name) - definitions = follow_inexistent_imports(defs) - return definitions + def check(name): + return isinstance(name, imports.SubModuleName) + + names = filter_follow_imports(names, check) + + defs = [classes.Definition(self._evaluator, d) for d in set(names)] + return helpers.sorted_definitions(defs) def usages(self, additional_module_paths=()): """ @@ -461,40 +236,15 @@ def usages(self, additional_module_paths=()): :rtype: list of :class:`classes.Definition` """ - temp, settings.dynamic_flow_information = \ - settings.dynamic_flow_information, False - try: - user_stmt = self._parser.user_stmt() - definitions = self._goto(add_import_name=True) - if not definitions and isinstance(user_stmt, tree.Import): - # For not defined imports (goto doesn't find something, we take - # the name as a definition. This is enough, because every name - # points to it. - name = user_stmt.name_for_position(self._pos) - if name is None: - # Must be syntax - return [] - definitions = [name] - - if not definitions: - # Without a definition for a name we cannot find references. - return [] - - if not isinstance(user_stmt, tree.Import): - # import case is looked at with add_import_name option - definitions = usages.usages_add_import_modules(self._evaluator, - definitions) - - module = set([d.get_parent_until() for d in definitions]) - module.add(self._parser.module()) - names = usages.usages(self._evaluator, definitions, module) + tree_name = self._get_module_node().get_name_of_position(self._pos) + if tree_name is None: + # Must be syntax + return [] - for d in set(definitions): - names.append(classes.Definition(self._evaluator, d)) - finally: - settings.dynamic_flow_information = temp + names = usages.usages(self._get_module(), tree_name) - return helpers.sorted_definitions(set(names)) + definitions = [classes.Definition(self._evaluator, n) for n in names] + return helpers.sorted_definitions(definitions) def call_signatures(self): """ @@ -508,50 +258,67 @@ def call_signatures(self): abs()# <-- cursor is here - This would return ``None``. + This would return an empty list.. :rtype: list of :class:`classes.CallSignature` """ - call_txt, call_index, key_name, start_pos = self._user_context.call_signature() - if call_txt is None: + call_signature_details = \ + helpers.get_call_signature_details(self._get_module_node(), self._pos) + if call_signature_details is None: return [] - stmt = self._get_under_cursor_stmt(call_txt, start_pos) - if stmt is None: - return [] - - with common.scale_speed_settings(settings.scale_call_signatures): - origins = cache.cache_call_signatures(self._evaluator, stmt, - self.source, self._pos) + context = self._evaluator.create_context( + self._get_module(), + call_signature_details.bracket_leaf + ) + definitions = helpers.cache_call_signatures( + self._evaluator, + context, + call_signature_details.bracket_leaf, + self._code_lines, + self._pos + ) debug.speed('func_call followed') - return [classes.CallSignature(self._evaluator, o.name, stmt, call_index, key_name) - for o in origins if hasattr(o, 'py__call__')] + return [classes.CallSignature(self._evaluator, d.name, + call_signature_details.bracket_leaf.start_pos, + call_signature_details.call_index, + call_signature_details.keyword_name_str) + for d in definitions if hasattr(d, 'py__call__')] def _analysis(self): - def check_types(types): - for typ in types: - try: - f = typ.iter_content - except AttributeError: - pass + self._evaluator.is_analysis = True + module_node = self._get_module_node() + self._evaluator.analysis_modules = [module_node] + try: + for node in get_executable_nodes(module_node): + context = self._get_module().create_context(node) + if node.type in ('funcdef', 'classdef'): + # Resolve the decorators. + tree_name_to_contexts(self._evaluator, context, node.children[1]) + elif isinstance(node, tree.Import): + import_names = set(node.get_defined_names()) + if node.is_nested(): + import_names |= set(path[-1] for path in node.get_paths()) + for n in import_names: + imports.infer_import(context, n) + elif node.type == 'expr_stmt': + types = context.eval_node(node) + for testlist in node.children[:-1:2]: + # Iterate tuples. + unpack_tuple_to_dict(context, types, testlist) else: - check_types(f()) - - #statements = set(chain(*self._parser.module().used_names.values())) - nodes, imp_names, decorated_funcs = \ - analysis.get_module_statements(self._parser.module()) - # Sort the statements so that the results are reproducible. - for n in imp_names: - imports.ImportWrapper(self._evaluator, n).follow() - for node in sorted(nodes, key=lambda obj: obj.start_pos): - check_types(self._evaluator.eval_element(node)) - - for dec_func in decorated_funcs: - er.Function(self._evaluator, dec_func).get_decorated_func() + if node.type == 'name': + defs = self._evaluator.goto_definitions(context, node) + else: + defs = evaluate_call_of_leaf(context, node) + try_iter_content(defs) + self._evaluator.reset_recursion_limitations() - ana = [a for a in self._evaluator.analysis if self.path == a.path] - return sorted(set(ana), key=lambda x: x.line) + ana = [a for a in self._evaluator.analysis if self.path == a.path] + return sorted(set(ana), key=lambda x: x.line) + finally: + self._evaluator.is_analysis = False class Interpreter(Script): @@ -565,7 +332,7 @@ class Interpreter(Script): >>> from os.path import join >>> namespace = locals() - >>> script = Interpreter('join().up', [namespace]) + >>> script = Interpreter('join("").up', [namespace]) >>> print(script.completions()[0].name) upper """ @@ -584,81 +351,22 @@ def __init__(self, source, namespaces, **kwds): If `line` and `column` are None, they are assumed be at the end of `source`. """ - if type(namespaces) is not list or len(namespaces) == 0 or \ - any([type(x) is not dict for x in namespaces]): - raise TypeError("namespaces must be a non-empty list of dict") + try: + namespaces = [dict(n) for n in namespaces] + except Exception: + raise TypeError("namespaces must be a non-empty list of dicts.") super(Interpreter, self).__init__(source, **kwds) self.namespaces = namespaces - # Don't use the fast parser, because it does crazy stuff that we don't - # need in our very simple and small code here (that is always - # changing). - self._parser = UserContextParser(self._grammar, self.source, - self._orig_path, self._pos, - self._user_context, self._parsed_callback, - use_fast_parser=False) - interpreter.add_namespaces_to_parser(self._evaluator, namespaces, - self._parser.module()) - - def _simple_complete(self, path, dot, like): - user_stmt = self._parser.user_stmt_with_whitespace() - is_simple_path = not path or re.search('^[\w][\w\d.]*$', path) - if isinstance(user_stmt, tree.Import) or not is_simple_path: - return super(Interpreter, self)._simple_complete(path, dot, like) - else: - class NamespaceModule(object): - def __getattr__(_, name): - for n in self.namespaces: - try: - return n[name] - except KeyError: - pass - raise AttributeError() - - def __dir__(_): - gen = (n.keys() for n in self.namespaces) - return list(set(chain.from_iterable(gen))) - - paths = path.split('.') if path else [] - - namespaces = (NamespaceModule(), builtins) - for p in paths: - old, namespaces = namespaces, [] - for n in old: - try: - namespaces.append(getattr(n, p)) - except Exception: - pass - - completion_names = [] - for namespace in namespaces: - for name in dir(namespace): - if name.lower().startswith(like.lower()): - scope = self._parser.module() - n = FakeName(name, scope) - completion_names.append(n) - return completion_names - - -def defined_names(source, path=None, encoding='utf-8'): - """ - Get all definitions in `source` sorted by its position. - - This functions can be used for listing functions, classes and - data defined in a file. This can be useful if you want to list - them in "sidebar". Each element in the returned list also has - `defined_names` method which can be used to get sub-definitions - (e.g., methods in class). - - :rtype: list of classes.Definition - - .. deprecated:: 0.9.0 - Use :func:`names` instead. - .. todo:: Remove! - """ - warnings.warn("Use call_signatures instead.", DeprecationWarning) - return names(source, path, encoding) + def _get_module(self): + parser_module = super(Interpreter, self)._get_module_node() + return interpreter.MixedModuleContext( + self._evaluator, + parser_module, + self.namespaces, + path=self.path + ) def names(source=None, path=None, encoding='utf-8', all_scopes=False, @@ -678,13 +386,21 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False, ``definitions=True``. E.g. ``a = b`` returns ``b``. """ def def_ref_filter(_def): - is_def = _def.is_definition() + is_def = _def._name.tree_name.is_definition() return definitions and is_def or references and not is_def # Set line/column to a random position, because they don't matter. script = Script(source, line=1, column=0, path=path, encoding=encoding) - defs = [classes.Definition(script._evaluator, name_part) - for name_part in get_module_names(script._parser.module(), all_scopes)] + module_context = script._get_module() + defs = [ + classes.Definition( + script._evaluator, + TreeNameDefinition( + module_context.create_context(name if name.parent.type == 'file_input' else name.parent), + name + ) + ) for name in get_module_names(script._get_module_node(), all_scopes) + ] return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column)) @@ -705,6 +421,8 @@ def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, """ Define a callback debug function to get all the debug messages. + If you don't specify any arguments, debug messages will be printed to stdout. + :param func_cb: The callback function for debug messages, with n params. """ debug.debug_function = func_cb diff --git a/pythonFiles/release/jedi/api/classes.py b/pythonFiles/release/jedi/api/classes.py index a1d42bd0ffe2..1937cc6065a4 100755 --- a/pythonFiles/release/jedi/api/classes.py +++ b/pythonFiles/release/jedi/api/classes.py @@ -3,34 +3,38 @@ These classes are the much bigger part of the whole API, because they contain the interesting information about completion and goto operations. """ -import warnings -from itertools import chain import re -from jedi._compatibility import unicode, use_metaclass +from parso.cache import parser_cache +from parso.python.tree import search_ancestor + +from jedi._compatibility import u from jedi import settings -from jedi import common -from jedi.parser import tree -from jedi.evaluate.cache import memoize_default, CachedMetaClass -from jedi.evaluate import representation as er -from jedi.evaluate import iterable +from jedi.evaluate.utils import ignored, unite +from jedi.cache import memoize_method from jedi.evaluate import imports from jedi.evaluate import compiled -from jedi.api import keywords -from jedi.evaluate.finder import filter_definition_names +from jedi.evaluate.filters import ParamName +from jedi.evaluate.imports import ImportName +from jedi.evaluate.context import instance +from jedi.evaluate.context import ClassContext, FunctionContext, FunctionExecutionContext +from jedi.api.keywords import KeywordName + +def _sort_names_by_start_pos(names): + return sorted(names, key=lambda s: s.start_pos or (0, 0)) -def defined_names(evaluator, scope): + +def defined_names(evaluator, context): """ List sub-definitions (e.g., methods in class). :type scope: Scope :rtype: list of Definition """ - dct = scope.names_dict - names = list(chain.from_iterable(dct.values())) - names = filter_definition_names(names, scope) - return [Definition(evaluator, d) for d in sorted(names, key=lambda s: s.start_pos)] + filter = next(context.get_filters(search_global=True)) + names = [name for name in filter.values()] + return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)] class BaseDefinition(object): @@ -51,25 +55,22 @@ class BaseDefinition(object): _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { 'argparse._ActionsContainer': 'argparse.ArgumentParser', - '_sre.SRE_Match': 're.MatchObject', - '_sre.SRE_Pattern': 're.RegexObject', }.items()) def __init__(self, evaluator, name): self._evaluator = evaluator self._name = name """ - An instance of :class:`jedi.parser.reprsentation.Name` subclass. + An instance of :class:`parso.reprsentation.Name` subclass. """ - self._definition = evaluator.wrap(self._name.get_definition()) - self.is_keyword = isinstance(self._definition, keywords.Keyword) + self.is_keyword = isinstance(self._name, KeywordName) # generate a path to the definition - self._module = name.get_parent_until() + self._module = name.get_root_context() if self.in_builtin_module(): self.module_path = None else: - self.module_path = self._module.path + self.module_path = self._module.py__file__() """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" @property @@ -81,17 +82,7 @@ def name(self): :rtype: str or None """ - return unicode(self._name) - - @property - def start_pos(self): - """ - .. deprecated:: 0.7.0 - Use :attr:`.line` and :attr:`.column` instead. - .. todo:: Remove! - """ - warnings.warn("Use line/column instead.", DeprecationWarning) - return self._name.start_pos + return self._name.string_name @property def type(self): @@ -130,7 +121,7 @@ def type(self): >>> defs = sorted(defs, key=lambda d: d.line) >>> defs # doctest: +NORMALIZE_WHITESPACE [, , - , ] + , ] Finally, here is what you can get from :attr:`type`: @@ -144,45 +135,59 @@ def type(self): 'function' """ - stripped = self._definition - if isinstance(stripped, er.InstanceElement): - stripped = stripped.var + tree_name = self._name.tree_name + resolve = False + if tree_name is not None: + # TODO move this to their respective names. + definition = tree_name.get_definition() + if definition is not None and definition.type == 'import_from' and \ + tree_name.is_definition(): + resolve = True - if isinstance(stripped, compiled.CompiledObject): - return stripped.api_type() - elif isinstance(stripped, iterable.Array): - return 'instance' - elif isinstance(stripped, tree.Import): - return 'import' - - string = type(stripped).__name__.lower().replace('wrapper', '') - if string == 'exprstmt': - return 'statement' - else: - return string + if isinstance(self._name, imports.SubModuleName) or resolve: + for context in self._name.infer(): + return context.api_type + return self._name.api_type def _path(self): """The path to a module/class/function definition.""" - path = [] - par = self._definition - while par is not None: - if isinstance(par, tree.Import): - path += imports.ImportWrapper(self._evaluator, self._name).import_path - break - try: - name = par.name - except AttributeError: - pass + def to_reverse(): + name = self._name + if name.api_type == 'module': + try: + name = list(name.infer())[0].name + except IndexError: + pass + + if name.api_type == 'module': + module_contexts = name.infer() + if module_contexts: + module_context, = module_contexts + for n in reversed(module_context.py__name__().split('.')): + yield n + else: + # We don't really know anything about the path here. This + # module is just an import that would lead in an + # ImportError. So simply return the name. + yield name.string_name + return else: - if isinstance(par, er.ModuleWrapper): - # TODO just make the path dotted from the beginning, we - # shouldn't really split here. - path[0:0] = par.py__name__().split('.') - break + yield name.string_name + + parent_context = name.parent_context + while parent_context is not None: + try: + method = parent_context.py__name__ + except AttributeError: + try: + yield parent_context.name.string_name + except AttributeError: + pass else: - path.insert(0, unicode(name)) - par = par.parent - return path + for name in reversed(method().split('.')): + yield name + parent_context = parent_context.parent_context + return reversed(list(to_reverse())) @property def module_name(self): @@ -196,7 +201,7 @@ def module_name(self): >>> print(d.module_name) # doctest: +ELLIPSIS json """ - return str(self._module.name) + return self._module.name.string_name def in_builtin_module(self): """Whether this is a builtin module.""" @@ -205,18 +210,20 @@ def in_builtin_module(self): @property def line(self): """The line where the definition occurs (starting with 1).""" - if self.in_builtin_module(): + start_pos = self._name.start_pos + if start_pos is None: return None - return self._name.start_pos[0] + return start_pos[0] @property def column(self): """The column where the definition occurs (starting with 0).""" - if self.in_builtin_module(): + start_pos = self._name.start_pos + if start_pos is None: return None - return self._name.start_pos[1] + return start_pos[1] - def docstring(self, raw=False): + def docstring(self, raw=False, fast=True): r""" Return a document string for this completion object. @@ -241,36 +248,18 @@ def docstring(self, raw=False): >>> print(script.goto_definitions()[0].docstring(raw=True)) Document for function f. + :param fast: Don't follow imports that are only one level deep like + ``import foo``, but follow ``from foo import bar``. This makes + sense for speed reasons. Completing `import a` is slow if you use + the ``foo.docstring(fast=False)`` on every object, because it + parses all libraries starting with ``a``. """ - if raw: - return _Help(self._definition).raw() - else: - return _Help(self._definition).full() - - @property - def doc(self): - """ - .. deprecated:: 0.8.0 - Use :meth:`.docstring` instead. - .. todo:: Remove! - """ - warnings.warn("Use docstring() instead.", DeprecationWarning) - return self.docstring() - - @property - def raw_doc(self): - """ - .. deprecated:: 0.8.0 - Use :meth:`.docstring` instead. - .. todo:: Remove! - """ - warnings.warn("Use docstring() instead.", DeprecationWarning) - return self.docstring(raw=True) + return _Help(self._name).docstring(fast=fast, raw=raw) @property def description(self): """A textual description of the object.""" - return unicode(self._name) + return u(self._name.string_name) @property def full_name(self): @@ -291,16 +280,17 @@ def full_name(self): >>> print(script.goto_definitions()[0].full_name) os.path.join - Notice that it correctly returns ``'os.path.join'`` instead of - (for example) ``'posixpath.join'``. - + Notice that it returns ``'os.path.join'`` instead of (for example) + ``'posixpath.join'``. This is not correct, since the modules name would + be `````. However most users find the latter + more practical. """ - path = [unicode(p) for p in self._path()] + path = list(self._path()) # TODO add further checks, the mapping should only occur on stdlib. if not path: return None # for keywords the path is empty - with common.ignored(KeyError): + with ignored(KeyError): path[0] = self._mapping[path[0]] for key, repl in self._tuple_mapping.items(): if tuple(path[:len(key)]) == key: @@ -309,89 +299,122 @@ def full_name(self): return '.'.join(path if path[0] else path[1:]) def goto_assignments(self): - defs = self._evaluator.goto(self._name) - return [Definition(self._evaluator, d) for d in defs] + if self._name.tree_name is None: + return self - @memoize_default() - def _follow_statements_imports(self): - """ - Follow both statements and imports, as far as possible. - """ - if self._definition.isinstance(tree.ExprStmt): - return self._evaluator.eval_statement(self._definition) - elif self._definition.isinstance(tree.Import): - return imports.ImportWrapper(self._evaluator, self._name).follow() - else: - return [self._definition] + names = self._evaluator.goto(self._name.parent_context, self._name.tree_name) + return [Definition(self._evaluator, n) for n in names] + + def _goto_definitions(self): + # TODO make this function public. + return [Definition(self._evaluator, d.name) for d in self._name.infer()] @property - @memoize_default() + @memoize_method def params(self): """ Raises an ``AttributeError``if the definition is not callable. Otherwise returns a list of `Definition` that represents the params. """ - followed = self._follow_statements_imports() + def get_param_names(context): + param_names = [] + if context.api_type == 'function': + param_names = list(context.get_param_names()) + if isinstance(context, instance.BoundMethod): + param_names = param_names[1:] + elif isinstance(context, (instance.AbstractInstanceContext, ClassContext)): + if isinstance(context, ClassContext): + search = '__init__' + else: + search = '__call__' + names = context.get_function_slot_names(search) + if not names: + return [] + + # Just take the first one here, not optimal, but currently + # there's no better solution. + inferred = names[0].infer() + param_names = get_param_names(next(iter(inferred))) + if isinstance(context, ClassContext): + param_names = param_names[1:] + return param_names + elif isinstance(context, compiled.CompiledObject): + return list(context.get_param_names()) + return param_names + + followed = list(self._name.infer()) if not followed or not hasattr(followed[0], 'py__call__'): raise AttributeError() - followed = followed[0] # only check the first one. + context = followed[0] # only check the first one. - if followed.type == 'funcdef': - if isinstance(followed, er.InstanceElement): - params = followed.params[1:] - else: - params = followed.params - elif followed.isinstance(er.compiled.CompiledObject): - params = followed.params - else: - try: - sub = followed.get_subscope_by_name('__init__') - params = sub.params[1:] # ignore self - except KeyError: - return [] - return [_Param(self._evaluator, p.name) for p in params] + return [Definition(self._evaluator, n) for n in get_param_names(context)] def parent(self): - scope = self._definition.get_parent_scope() - scope = self._evaluator.wrap(scope) - return Definition(self._evaluator, scope.name) + context = self._name.parent_context + if context is None: + return None + + if isinstance(context, FunctionExecutionContext): + # TODO the function context should be a part of the function + # execution context. + context = FunctionContext( + self._evaluator, context.parent_context, context.tree_node) + return Definition(self._evaluator, context.name) def __repr__(self): return "<%s %s>" % (type(self).__name__, self.description) + def get_line_code(self, before=0, after=0): + """ + Returns the line of code where this object was defined. + + :param before: Add n lines before the current line to the output. + :param after: Add n lines after the current line to the output. + + :return str: Returns the line(s) of code or an empty string if it's a + builtin. + """ + if self.in_builtin_module(): + return '' + + path = self._name.get_root_context().py__file__() + lines = parser_cache[self._evaluator.grammar._hashed][path].lines + + index = self._name.start_pos[0] - 1 + start_index = max(index - before, 0) + return ''.join(lines[start_index:index + after + 1]) + class Completion(BaseDefinition): """ `Completion` objects are returned from :meth:`api.Script.completions`. They provide additional information about a completion. """ - def __init__(self, evaluator, name, needs_dot, like_name_length): + def __init__(self, evaluator, name, stack, like_name_length): super(Completion, self).__init__(evaluator, name) - self._needs_dot = needs_dot self._like_name_length = like_name_length + self._stack = stack # Completion objects with the same Completion name (which means # duplicate items in the completion) self._same_name_completions = [] def _complete(self, like_name): - dot = '.' if self._needs_dot else '' append = '' if settings.add_bracket_after_function \ and self.type == 'Function': append = '(' - if settings.add_dot_after_module: - if isinstance(self._definition, tree.Module): - append += '.' - if isinstance(self._definition, tree.Param): - append += '=' + if isinstance(self._name, ParamName) and self._stack is not None: + node_names = list(self._stack.get_node_names(self._evaluator.grammar._pgen_grammar)) + if 'trailer' in node_names and 'argument' not in node_names: + append += '=' - name = str(self._name) + name = self._name.string_name if like_name: name = name[self._like_name_length:] - return dot + name + append + return name + append @property def complete(self): @@ -402,90 +425,51 @@ def complete(self): would return the string 'ce'. It also adds additional stuff, depending on your `settings.py`. + + Assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(par`` would give a ``Completion`` which `complete` + would be `am=` + + """ return self._complete(True) @property def name_with_symbols(self): """ - Similar to :attr:`name`, but like :attr:`name` - returns also the symbols, for example:: + Similar to :attr:`name`, but like :attr:`name` returns also the + symbols, for example assuming the following function definition:: - list() + def foo(param=0): + pass + + completing ``foo(`` would give a ``Completion`` which + ``name_with_symbols`` would be "param=". - would return ``.append`` and others (which means it adds a dot). """ return self._complete(False) + def docstring(self, raw=False, fast=True): + if self._like_name_length >= 3: + # In this case we can just resolve the like name, because we + # wouldn't load like > 100 Python modules anymore. + fast = False + return super(Completion, self).docstring(raw=raw, fast=fast) + @property def description(self): """Provide a description of the completion object.""" - if self._definition is None: - return '' - t = self.type - if t == 'statement' or t == 'import': - desc = self._definition.get_code() - else: - desc = '.'.join(unicode(p) for p in self._path()) - - line = '' if self.in_builtin_module else '@%s' % self.line - return '%s: %s%s' % (t, desc, line) + # TODO improve the class structure. + return Definition.description.__get__(self) def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self._name) + return '<%s: %s>' % (type(self).__name__, self._name.string_name) - def docstring(self, raw=False, fast=True): - """ - :param fast: Don't follow imports that are only one level deep like - ``import foo``, but follow ``from foo import bar``. This makes - sense for speed reasons. Completing `import a` is slow if you use - the ``foo.docstring(fast=False)`` on every object, because it - parses all libraries starting with ``a``. - """ - definition = self._definition - if isinstance(definition, tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - if len(i.import_path) > 1 or not fast: - followed = self._follow_statements_imports() - if followed: - # TODO: Use all of the followed objects as input to Documentation. - definition = followed[0] - - if raw: - return _Help(definition).raw() - else: - return _Help(definition).full() - - @property - def type(self): - """ - The type of the completion objects. Follows imports. For a further - description, look at :attr:`jedi.api.classes.BaseDefinition.type`. - """ - if isinstance(self._definition, tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - if len(i.import_path) <= 1: - return 'module' - - followed = self.follow_definition() - if followed: - # Caveat: Only follows the first one, ignore the other ones. - # This is ok, since people are almost never interested in - # variations. - return followed[0].type - return super(Completion, self).type - - @memoize_default() - def _follow_statements_imports(self): - # imports completion is very complicated and needs to be treated - # separately in Completion. - definition = self._definition - if definition.isinstance(tree.Import): - i = imports.ImportWrapper(self._evaluator, self._name) - return i.follow() - return super(Completion, self)._follow_statements_imports() - - @memoize_default() + @memoize_method def follow_definition(self): """ Return the original definitions. I strongly recommend not using it for @@ -495,11 +479,11 @@ def follow_definition(self): follows all results. This means with 1000 completions (e.g. numpy), it's just PITA-slow. """ - defs = self._follow_statements_imports() + defs = self._name.infer() return [Definition(self._evaluator, d.name) for d in defs] -class Definition(use_metaclass(CachedMetaClass, BaseDefinition)): +class Definition(BaseDefinition): """ *Definition* objects are returned from :meth:`api.Script.goto_assignments` or :meth:`api.Script.goto_definitions`. @@ -535,45 +519,30 @@ def description(self): 'class C' """ - d = self._definition - if isinstance(d, er.InstanceElement): - d = d.var - - if isinstance(d, compiled.CompiledObject): - typ = d.api_type() - if typ == 'instance': - typ = 'class' # The description should be similar to Py objects. - d = typ + ' ' + d.name.get_code() - elif isinstance(d, iterable.Array): - d = 'class ' + d.type - elif isinstance(d, (tree.Class, er.Class, er.Instance)): - d = 'class ' + unicode(d.name) - elif isinstance(d, (er.Function, tree.Function)): - d = 'def ' + unicode(d.name) - elif isinstance(d, tree.Module): - # only show module name - d = 'module %s' % self.module_name - elif isinstance(d, tree.Param): - d = d.get_code().strip() - if d.endswith(','): - d = d[:-1] # Remove the comma. - else: # ExprStmt - try: - first_leaf = d.first_leaf() - except AttributeError: - # `d` is already a Leaf (Name). - first_leaf = d - # Remove the prefix, because that's not what we want for get_code - # here. - old, first_leaf.prefix = first_leaf.prefix, '' - try: - d = d.get_code() - finally: - first_leaf.prefix = old + typ = self.type + tree_name = self._name.tree_name + if typ in ('function', 'class', 'module', 'instance') or tree_name is None: + if typ == 'function': + # For the description we want a short and a pythonic way. + typ = 'def' + return typ + ' ' + u(self._name.string_name) + elif typ == 'param': + code = search_ancestor(tree_name, 'param').get_code( + include_prefix=False, + include_comma=False + ) + return typ + ' ' + code + + + definition = tree_name.get_definition() or tree_name + # Remove the prefix, because that's not what we want for get_code + # here. + txt = definition.get_code(include_prefix=False) # Delete comments: - d = re.sub('#[^\n]+\n', ' ', d) + txt = re.sub('#[^\n]+\n', ' ', txt) # Delete multi spaces/newlines - return re.sub('\s+', ' ', d).strip() + txt = re.sub('\s+', ' ', txt).strip() + return txt @property def desc_with_module(self): @@ -589,26 +558,28 @@ def desc_with_module(self): position = '' if self.in_builtin_module else '@%s' % (self.line) return "%s:%s%s" % (self.module_name, self.description, position) - @memoize_default() + @memoize_method def defined_names(self): """ List sub-definitions (e.g., methods in class). :rtype: list of Definition """ - defs = self._follow_statements_imports() - # For now we don't want base classes or evaluate decorators. - defs = [d.base if isinstance(d, (er.Class, er.Function)) else d for d in defs] - iterable = (defined_names(self._evaluator, d) for d in defs) - iterable = list(iterable) - return list(chain.from_iterable(iterable)) + defs = self._name.infer() + return sorted( + unite(defined_names(self._evaluator, d) for d in defs), + key=lambda s: s._name.start_pos or (0, 0) + ) def is_definition(self): """ Returns True, if defined as a name in a statement, function or class. Returns False, if it's a reference to such a definition. """ - return self._name.is_definition() + if self._name.tree_name is None: + return True + else: + return self._name.tree_name.is_definition() def __eq__(self, other): return self._name.start_pos == other._name.start_pos \ @@ -629,11 +600,11 @@ class CallSignature(Definition): It knows what functions you are currently in. e.g. `isinstance(` would return the `isinstance` function. without `(` it would return nothing. """ - def __init__(self, evaluator, executable_name, call_stmt, index, key_name): + def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str): super(CallSignature, self).__init__(evaluator, executable_name) self._index = index - self._key_name = key_name - self._call_stmt = call_stmt + self._key_name_str = key_name_str + self._bracket_start_pos = bracket_start_pos @property def index(self): @@ -641,21 +612,24 @@ def index(self): The Param index of the current call. Returns None if the index cannot be found in the curent call. """ - if self._key_name is not None: + if self._key_name_str is not None: for i, param in enumerate(self.params): - if self._key_name == param.name: + if self._key_name_str == param.name: return i - if self.params and self.params[-1]._name.get_definition().stars == 2: - return i - else: - return None + if self.params: + param_name = self.params[-1]._name + if param_name.tree_name is not None: + if param_name.tree_name.get_definition().star_count == 2: + return i + return None if self._index >= len(self.params): - for i, param in enumerate(self.params): - # *args case - if param._name.get_definition().stars == 1: - return i + tree_name = param._name.tree_name + if tree_name is not None: + # *args case + if tree_name.get_definition().star_count == 1: + return i return None return self._index @@ -665,48 +639,11 @@ def bracket_start(self): The indent of the bracket that is responsible for the last function call. """ - return self._call_stmt.end_pos - - @property - def call_name(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.name` instead. - .. todo:: Remove! - - The name (e.g. 'isinstance') as a string. - """ - warnings.warn("Use name instead.", DeprecationWarning) - return unicode(self.name) - - @property - def module(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.module_name` for the module name. - .. todo:: Remove! - """ - return self._executable.get_parent_until() + return self._bracket_start_pos def __repr__(self): - return '<%s: %s index %s>' % (type(self).__name__, self._name, - self.index) - - -class _Param(Definition): - """ - Just here for backwards compatibility. - """ - def get_code(self): - """ - .. deprecated:: 0.8.0 - Use :attr:`.description` and :attr:`.name` instead. - .. todo:: Remove! - - A function to get the whole code of the param. - """ - warnings.warn("Use description instead.", DeprecationWarning) - return self.description + return '<%s: %s index %s>' % \ + (type(self).__name__, self._name.string_name, self.index) class _Help(object): @@ -717,19 +654,25 @@ class _Help(object): def __init__(self, definition): self._name = definition - def full(self): - try: - return self._name.doc - except AttributeError: - return self.raw() + @memoize_method + def _get_contexts(self, fast): + if isinstance(self._name, ImportName) and fast: + return {} + + if self._name.api_type == 'statement': + return {} - def raw(self): + return self._name.infer() + + def docstring(self, fast=True, raw=True): """ - The raw docstring ``__doc__`` for any object. + The docstring ``__doc__`` for any object. See :attr:`doc` for example. """ - try: - return self._name.raw_doc - except AttributeError: - return '' + # TODO: Use all of the followed objects as output. Possibly divinding + # them by a few dashes. + for context in self._get_contexts(fast=fast): + return context.py__doc__(include_call_signature=not raw) + + return '' diff --git a/pythonFiles/release/jedi/api/completion.py b/pythonFiles/release/jedi/api/completion.py new file mode 100644 index 000000000000..559a4d3f8320 --- /dev/null +++ b/pythonFiles/release/jedi/api/completion.py @@ -0,0 +1,291 @@ +from parso.python import token +from parso.python import tree +from parso.tree import search_ancestor, Leaf + +from jedi import debug +from jedi import settings +from jedi.api import classes +from jedi.api import helpers +from jedi.evaluate import imports +from jedi.api import keywords +from jedi.evaluate.helpers import evaluate_call_of_leaf +from jedi.evaluate.filters import get_global_filters +from jedi.parser_utils import get_statement_of_position + + +def get_call_signature_param_names(call_signatures): + # add named params + for call_sig in call_signatures: + for p in call_sig.params: + # Allow protected access, because it's a public API. + tree_name = p._name.tree_name + # Compiled modules typically don't allow keyword arguments. + if tree_name is not None: + # Allow access on _definition here, because it's a + # public API and we don't want to make the internal + # Name object public. + tree_param = tree.search_ancestor(tree_name, 'param') + if tree_param.star_count == 0: # no *args/**kwargs + yield p._name + + +def filter_names(evaluator, completion_names, stack, like_name): + comp_dct = {} + for name in completion_names: + if settings.case_insensitive_completion \ + and name.string_name.lower().startswith(like_name.lower()) \ + or name.string_name.startswith(like_name): + + new = classes.Completion( + evaluator, + name, + stack, + len(like_name) + ) + k = (new.name, new.complete) # key + if k in comp_dct and settings.no_completion_duplicates: + comp_dct[k]._same_name_completions.append(new) + else: + comp_dct[k] = new + yield new + + +def get_user_scope(module_context, position): + """ + Returns the scope in which the user resides. This includes flows. + """ + user_stmt = get_statement_of_position(module_context.tree_node, position) + if user_stmt is None: + def scan(scope): + for s in scope.children: + if s.start_pos <= position <= s.end_pos: + if isinstance(s, (tree.Scope, tree.Flow)): + return scan(s) or s + elif s.type in ('suite', 'decorated'): + return scan(s) + return None + + scanned_node = scan(module_context.tree_node) + if scanned_node: + return module_context.create_context(scanned_node, node_is_context=True) + return module_context + else: + return module_context.create_context(user_stmt) + + +def get_flow_scope_node(module_node, position): + node = module_node.get_leaf_for_position(position, include_prefixes=True) + while not isinstance(node, (tree.Scope, tree.Flow)): + node = node.parent + + return node + + +class Completion: + def __init__(self, evaluator, module, code_lines, position, call_signatures_method): + self._evaluator = evaluator + self._module_context = module + self._module_node = module.tree_node + self._code_lines = code_lines + + # The first step of completions is to get the name + self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position) + # The actual cursor position is not what we need to calculate + # everything. We want the start of the name we're on. + self._position = position[0], position[1] - len(self._like_name) + self._call_signatures_method = call_signatures_method + + def completions(self): + completion_names = self._get_context_completions() + + completions = filter_names(self._evaluator, completion_names, + self.stack, self._like_name) + + return sorted(completions, key=lambda x: (x.name.startswith('__'), + x.name.startswith('_'), + x.name.lower())) + + def _get_context_completions(self): + """ + Analyzes the context that a completion is made in and decides what to + return. + + Technically this works by generating a parser stack and analysing the + current stack for possible grammar nodes. + + Possible enhancements: + - global/nonlocal search global + - yield from / raise from <- could be only exceptions/generators + - In args: */**: no completion + - In params (also lambda): no completion before = + """ + + grammar = self._evaluator.grammar + + try: + self.stack = helpers.get_stack_at_position( + grammar, self._code_lines, self._module_node, self._position + ) + except helpers.OnErrorLeaf as e: + self.stack = None + if e.error_leaf.value == '.': + # After ErrorLeaf's that are dots, we will not do any + # completions since this probably just confuses the user. + return [] + # If we don't have a context, just use global completion. + + return self._global_completions() + + allowed_keywords, allowed_tokens = \ + helpers.get_possible_completion_types(grammar._pgen_grammar, self.stack) + + if 'if' in allowed_keywords: + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + previous_leaf = leaf.get_previous_leaf() + + indent = self._position[1] + if not (leaf.start_pos <= self._position <= leaf.end_pos): + indent = leaf.start_pos[1] + + if previous_leaf is not None: + stmt = previous_leaf + while True: + stmt = search_ancestor( + stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt', + 'error_node', + ) + if stmt is None: + break + + type_ = stmt.type + if type_ == 'error_node': + first = stmt.children[0] + if isinstance(first, Leaf): + type_ = first.value + '_stmt' + # Compare indents + if stmt.start_pos[1] == indent: + if type_ == 'if_stmt': + allowed_keywords += ['elif', 'else'] + elif type_ == 'try_stmt': + allowed_keywords += ['except', 'finally', 'else'] + elif type_ == 'for_stmt': + allowed_keywords.append('else') + + completion_names = list(self._get_keyword_completion_names(allowed_keywords)) + + if token.NAME in allowed_tokens or token.INDENT in allowed_tokens: + # This means that we actually have to do type inference. + + symbol_names = list(self.stack.get_node_names(grammar._pgen_grammar)) + + nodes = list(self.stack.get_nodes()) + + if nodes and nodes[-1] in ('as', 'def', 'class'): + # No completions for ``with x as foo`` and ``import x as foo``. + # Also true for defining names as a class or function. + return list(self._get_class_context_completions(is_function=True)) + elif "import_stmt" in symbol_names: + level, names = self._parse_dotted_names(nodes, "import_from" in symbol_names) + + only_modules = not ("import_from" in symbol_names and 'import' in nodes) + completion_names += self._get_importer_names( + names, + level, + only_modules=only_modules, + ) + elif symbol_names[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': + dot = self._module_node.get_leaf_for_position(self._position) + completion_names += self._trailer_completions(dot.get_previous_leaf()) + else: + completion_names += self._global_completions() + completion_names += self._get_class_context_completions(is_function=False) + + if 'trailer' in symbol_names: + call_signatures = self._call_signatures_method() + completion_names += get_call_signature_param_names(call_signatures) + + return completion_names + + def _get_keyword_completion_names(self, keywords_): + for k in keywords_: + yield keywords.keyword(self._evaluator, k).name + + def _global_completions(self): + context = get_user_scope(self._module_context, self._position) + debug.dbg('global completion scope: %s', context) + flow_scope_node = get_flow_scope_node(self._module_node, self._position) + filters = get_global_filters( + self._evaluator, + context, + self._position, + origin_scope=flow_scope_node + ) + completion_names = [] + for filter in filters: + completion_names += filter.values() + return completion_names + + def _trailer_completions(self, previous_leaf): + user_context = get_user_scope(self._module_context, self._position) + evaluation_context = self._evaluator.create_context( + self._module_context, previous_leaf + ) + contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf) + completion_names = [] + debug.dbg('trailer completion contexts: %s', contexts) + for context in contexts: + for filter in context.get_filters( + search_global=False, origin_scope=user_context.tree_node): + completion_names += filter.values() + return completion_names + + def _parse_dotted_names(self, nodes, is_import_from): + level = 0 + names = [] + for node in nodes[1:]: + if node in ('.', '...'): + if not names: + level += len(node.value) + elif node.type == 'dotted_name': + names += node.children[::2] + elif node.type == 'name': + names.append(node) + elif node == ',': + if not is_import_from: + names = [] + else: + # Here if the keyword `import` comes along it stops checking + # for names. + break + return level, names + + def _get_importer_names(self, names, level=0, only_modules=True): + names = [n.value for n in names] + i = imports.Importer(self._evaluator, names, self._module_context, level) + return i.completion_names(self._evaluator, only_modules=only_modules) + + def _get_class_context_completions(self, is_function=True): + """ + Autocomplete inherited methods when overriding in child class. + """ + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + cls = tree.search_ancestor(leaf, 'classdef') + if isinstance(cls, (tree.Class, tree.Function)): + # Complete the methods that are defined in the super classes. + random_context = self._module_context.create_context( + cls, + node_is_context=True + ) + else: + return + + if cls.start_pos[1] >= leaf.start_pos[1]: + return + + filters = random_context.get_filters(search_global=False, is_instance=True) + # The first dict is the dictionary of class itself. + next(filters) + for filter in filters: + for name in filter.values(): + if (name.api_type == 'function') == is_function: + yield name diff --git a/pythonFiles/release/jedi/api/helpers.py b/pythonFiles/release/jedi/api/helpers.py index b1b3f6e441c9..2c4d8e0d10fc 100755 --- a/pythonFiles/release/jedi/api/helpers.py +++ b/pythonFiles/release/jedi/api/helpers.py @@ -2,18 +2,20 @@ Helpers for the API """ import re +from collections import namedtuple +from textwrap import dedent -from jedi.parser import tree as pt -from jedi.evaluate import imports +from parso.python.parser import Parser +from parso.python import tree +from parso import split_lines +from jedi._compatibility import u +from jedi.evaluate.syntax_tree import eval_atom +from jedi.evaluate.helpers import evaluate_call_of_leaf +from jedi.cache import time_cache -def completion_parts(path_until_cursor): - """ - Returns the parts for the completion - :return: tuple - (path, dot, like) - """ - match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path_until_cursor, flags=re.S) - return match.groups() + +CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) def sorted_definitions(defs): @@ -21,58 +23,294 @@ def sorted_definitions(defs): return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) -def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False): +def get_on_completion_name(module_node, lines, position): + leaf = module_node.get_leaf_for_position(position) + if leaf is None or leaf.type in ('string', 'error_leaf'): + # Completions inside strings are a bit special, we need to parse the + # string. The same is true for comments and error_leafs. + line = lines[position[0] - 1] + # The first step of completions is to get the name + return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0) + elif leaf.type not in ('name', 'keyword'): + return '' + + return leaf.value[:position[1] - leaf.start_pos[1]] + + +def _get_code(code_lines, start_pos, end_pos): + # Get relevant lines. + lines = code_lines[start_pos[0] - 1:end_pos[0]] + # Remove the parts at the end of the line. + lines[-1] = lines[-1][:end_pos[1]] + # Remove first line indentation. + lines[0] = lines[0][start_pos[1]:] + return '\n'.join(lines) + + +class OnErrorLeaf(Exception): + @property + def error_leaf(self): + return self.args[0] + + +def _is_on_comment(leaf, position): + comment_lines = split_lines(leaf.prefix) + difference = leaf.start_pos[0] - position[0] + prefix_start_pos = leaf.get_start_pos_of_prefix() + if difference == 0: + indent = leaf.start_pos[1] + elif position[0] == prefix_start_pos[0]: + indent = prefix_start_pos[1] + else: + indent = 0 + line = comment_lines[-difference - 1][:position[1] - indent] + return '#' in line + + +def _get_code_for_stack(code_lines, module_node, position): + leaf = module_node.get_leaf_for_position(position, include_prefixes=True) + # It might happen that we're on whitespace or on a comment. This means + # that we would not get the right leaf. + if leaf.start_pos >= position: + if _is_on_comment(leaf, position): + return u('') + + # If we're not on a comment simply get the previous leaf and proceed. + leaf = leaf.get_previous_leaf() + if leaf is None: + return u('') # At the beginning of the file. + + is_after_newline = leaf.type == 'newline' + while leaf.type == 'newline': + leaf = leaf.get_previous_leaf() + if leaf is None: + return u('') + + if leaf.type == 'error_leaf' or leaf.type == 'string': + if leaf.start_pos[0] < position[0]: + # On a different line, we just begin anew. + return u('') + + # Error leafs cannot be parsed, completion in strings is also + # impossible. + raise OnErrorLeaf(leaf) + else: + user_stmt = leaf + while True: + if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'): + break + user_stmt = user_stmt.parent + + if is_after_newline: + if user_stmt.start_pos[1] > position[1]: + # This means that it's actually a dedent and that means that we + # start without context (part of a suite). + return u('') + + # This is basically getting the relevant lines. + return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) + + +def get_stack_at_position(grammar, code_lines, module_node, pos): """ - Resolve the user statement, if it is an import. Only resolve the - parts until the user position. + Returns the possible node names (e.g. import_from, xor_test or yield_stmt). """ - name = user_stmt.name_for_position(user_context.position) - if name is None: - return None, None - - i = imports.ImportWrapper(evaluator, name) - return i, name - - -def check_error_statements(module, pos): - for error_statement in module.error_statement_stacks: - if error_statement.first_type in ('import_from', 'import_name') \ - and error_statement.first_pos < pos <= error_statement.next_start_pos: - return importer_from_error_statement(error_statement, pos) - return None, 0, False, False - - -def importer_from_error_statement(error_statement, pos): - def check_dotted(children): - for name in children[::2]: - if name.start_pos <= pos: - yield name - - names = [] - level = 0 - only_modules = True - unfinished_dotted = False - for typ, nodes in error_statement.stack: - if typ == 'dotted_name': - names += check_dotted(nodes) - if nodes[-1] == '.': - # An unfinished dotted_name - unfinished_dotted = True - elif typ == 'import_name': - if nodes[0].start_pos <= pos <= nodes[0].end_pos: - # We are on the import. - return None, 0, False, False - elif typ == 'import_from': + class EndMarkerReached(Exception): + pass + + def tokenize_without_endmarker(code): + # TODO This is for now not an official parso API that exists purely + # for Jedi. + tokens = grammar._tokenize(code) + for token_ in tokens: + if token_.string == safeword: + raise EndMarkerReached() + else: + yield token_ + + # The code might be indedented, just remove it. + code = dedent(_get_code_for_stack(code_lines, module_node, pos)) + # We use a word to tell Jedi when we have reached the start of the + # completion. + # Use Z as a prefix because it's not part of a number suffix. + safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' + code = code + safeword + + p = Parser(grammar._pgen_grammar, error_recovery=True) + try: + p.parse(tokens=tokenize_without_endmarker(code)) + except EndMarkerReached: + return Stack(p.pgen_parser.stack) + raise SystemError("This really shouldn't happen. There's a bug in Jedi.") + + +class Stack(list): + def get_node_names(self, grammar): + for dfa, state, (node_number, nodes) in self: + yield grammar.number2symbol[node_number] + + def get_nodes(self): + for dfa, state, (node_number, nodes) in self: for node in nodes: - if node.start_pos >= pos: - break - elif isinstance(node, pt.Node) and node.type == 'dotted_name': - names += check_dotted(node.children) - elif node in ('.', '...'): - level += len(node.value) - elif isinstance(node, pt.Name): - names.append(node) - elif node == 'import': - only_modules = False - - return names, level, only_modules, unfinished_dotted + yield node + + +def get_possible_completion_types(pgen_grammar, stack): + def add_results(label_index): + try: + grammar_labels.append(inversed_tokens[label_index]) + except KeyError: + try: + keywords.append(inversed_keywords[label_index]) + except KeyError: + t, v = pgen_grammar.labels[label_index] + assert t >= 256 + # See if it's a symbol and if we're in its first set + inversed_keywords + itsdfa = pgen_grammar.dfas[t] + itsstates, itsfirst = itsdfa + for first_label_index in itsfirst.keys(): + add_results(first_label_index) + + inversed_keywords = dict((v, k) for k, v in pgen_grammar.keywords.items()) + inversed_tokens = dict((v, k) for k, v in pgen_grammar.tokens.items()) + + keywords = [] + grammar_labels = [] + + def scan_stack(index): + dfa, state, node = stack[index] + states, first = dfa + arcs = states[state] + + for label_index, new_state in arcs: + if label_index == 0: + # An accepting state, check the stack below. + scan_stack(index - 1) + else: + add_results(label_index) + + scan_stack(-1) + + return keywords, grammar_labels + + +def evaluate_goto_definition(evaluator, context, leaf): + if leaf.type == 'name': + # In case of a name we can just use goto_definition which does all the + # magic itself. + return evaluator.goto_definitions(context, leaf) + + parent = leaf.parent + if parent.type == 'atom': + return context.eval_node(leaf.parent) + elif parent.type == 'trailer': + return evaluate_call_of_leaf(context, leaf) + elif isinstance(leaf, tree.Literal): + return eval_atom(context, leaf) + return [] + + +CallSignatureDetails = namedtuple( + 'CallSignatureDetails', + ['bracket_leaf', 'call_index', 'keyword_name_str'] +) + + +def _get_index_and_key(nodes, position): + """ + Returns the amount of commas and the keyword argument string. + """ + nodes_before = [c for c in nodes if c.start_pos < position] + if nodes_before[-1].type == 'arglist': + nodes_before = [c for c in nodes_before[-1].children if c.start_pos < position] + + key_str = None + + if nodes_before: + last = nodes_before[-1] + if last.type == 'argument' and last.children[1].end_pos <= position: + # Checked if the argument + key_str = last.children[0].value + elif last == '=': + key_str = nodes_before[-2].value + + return nodes_before.count(','), key_str + + +def _get_call_signature_details_from_error_node(node, position): + for index, element in reversed(list(enumerate(node.children))): + # `index > 0` means that it's a trailer and not an atom. + if element == '(' and element.end_pos <= position and index > 0: + # It's an error node, we don't want to match too much, just + # until the parentheses is enough. + children = node.children[index:] + name = element.get_previous_leaf() + if name is None: + continue + if name.type == 'name' or name.parent.type in ('trailer', 'atom'): + return CallSignatureDetails( + element, + *_get_index_and_key(children, position) + ) + + +def get_call_signature_details(module, position): + leaf = module.get_leaf_for_position(position, include_prefixes=True) + if leaf.start_pos >= position: + # Whitespace / comments after the leaf count towards the previous leaf. + leaf = leaf.get_previous_leaf() + if leaf is None: + return None + + if leaf == ')': + if leaf.end_pos == position: + leaf = leaf.get_next_leaf() + + # Now that we know where we are in the syntax tree, we start to look at + # parents for possible function definitions. + node = leaf.parent + while node is not None: + if node.type in ('funcdef', 'classdef'): + # Don't show call signatures if there's stuff before it that just + # makes it feel strange to have a call signature. + return None + + for n in node.children[::-1]: + if n.start_pos < position and n.type == 'error_node': + result = _get_call_signature_details_from_error_node(n, position) + if result is not None: + return result + + if node.type == 'trailer' and node.children[0] == '(': + leaf = node.get_previous_leaf() + if leaf is None: + return None + return CallSignatureDetails( + node.children[0], *_get_index_and_key(node.children, position)) + + node = node.parent + + return None + + +@time_cache("call_signatures_validity") +def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos): + """This function calculates the cache key.""" + index = user_pos[0] - 1 + + before_cursor = code_lines[index][:user_pos[1]] + other_lines = code_lines[bracket_leaf.start_pos[0]:index] + whole = '\n'.join(other_lines + [before_cursor]) + before_bracket = re.match(r'.*\(', whole, re.DOTALL) + + module_path = context.get_root_context().py__file__() + if module_path is None: + yield None # Don't cache! + else: + yield (module_path, before_bracket, bracket_leaf.start_pos) + yield evaluate_goto_definition( + evaluator, + context, + bracket_leaf.get_previous_leaf() + ) diff --git a/pythonFiles/release/jedi/api/interpreter.py b/pythonFiles/release/jedi/api/interpreter.py index 595435c61589..202f345e94b9 100755 --- a/pythonFiles/release/jedi/api/interpreter.py +++ b/pythonFiles/release/jedi/api/interpreter.py @@ -1,109 +1,47 @@ """ TODO Some parts of this module are still not well documented. """ -import inspect -import re -from jedi._compatibility import builtins -from jedi import debug -from jedi.common import source_to_unicode -from jedi.cache import underscore_memoization +from jedi.evaluate.context import ModuleContext from jedi.evaluate import compiled -from jedi.evaluate.compiled.fake import get_module -from jedi.parser import tree as pt -from jedi.parser import load_grammar -from jedi.parser.fast import FastParser -from jedi.evaluate import helpers -from jedi.evaluate import iterable -from jedi.evaluate import representation as er +from jedi.evaluate.compiled import mixed +from jedi.evaluate.base_context import Context -def add_namespaces_to_parser(evaluator, namespaces, parser_module): - for namespace in namespaces: - for key, value in namespace.items(): - # Name lookups in an ast tree work by checking names_dict. - # Therefore we just add fake names to that and we're done. - arr = parser_module.names_dict.setdefault(key, []) - arr.append(LazyName(evaluator, parser_module, key, value)) +class NamespaceObject(object): + def __init__(self, dct): + self.__dict__ = dct -class LazyName(helpers.FakeName): - def __init__(self, evaluator, module, name, value): - super(LazyName, self).__init__(name) - self._module = module - self._evaluator = evaluator - self._value = value - self._name = name +class MixedModuleContext(Context): + resets_positions = True + type = 'mixed_module' - def is_definition(self): - return True + def __init__(self, evaluator, tree_module, namespaces, path): + self.evaluator = evaluator + self._namespaces = namespaces - @property - @underscore_memoization - def parent(self): - """ - Creating fake statements for the interpreter. - """ - obj = self._value - parser_path = [] - if inspect.ismodule(obj): - module = obj - else: - names = [] - try: - o = obj.__objclass__ - names.append(obj.__name__) - obj = o - except AttributeError: - pass + self._namespace_objects = [NamespaceObject(n) for n in namespaces] + self._module_context = ModuleContext(evaluator, tree_module, path=path) + self.tree_node = tree_module - try: - module_name = obj.__module__ - names.insert(0, obj.__name__) - except AttributeError: - # Unfortunately in some cases like `int` there's no __module__ - module = builtins - else: - # TODO this import is wrong. Yields x for x.y.z instead of z - module = __import__(module_name) - parser_path = names - raw_module = get_module(self._value) + def get_node(self): + return self.tree_node - found = [] - try: - path = module.__file__ - except AttributeError: - pass - else: - path = re.sub('c$', '', path) - if path.endswith('.py'): - # cut the `c` from `.pyc` - with open(path) as f: - source = source_to_unicode(f.read()) - mod = FastParser(load_grammar(), source, path[:-1]).module - if parser_path: - assert len(parser_path) == 1 - found = self._evaluator.find_types(mod, parser_path[0], search_global=True) - else: - found = [self._evaluator.wrap(mod)] + def get_filters(self, *args, **kwargs): + for filter in self._module_context.get_filters(*args, **kwargs): + yield filter - if not found: - debug.warning('Possibly an interpreter lookup for Python code failed %s', - parser_path) + for namespace_obj in self._namespace_objects: + compiled_object = compiled.create(self.evaluator, namespace_obj) + mixed_object = mixed.MixedObject( + self.evaluator, + parent_context=self, + compiled_object=compiled_object, + tree_context=self._module_context + ) + for filter in mixed_object.get_filters(*args, **kwargs): + yield filter - if not found: - evaluated = compiled.CompiledObject(obj) - if evaluated == builtins: - # The builtins module is special and always cached. - evaluated = compiled.builtin - found = [evaluated] - - content = iterable.AlreadyEvaluated(found) - stmt = pt.ExprStmt([self, pt.Operator(pt.zero_position_modifier, - '=', (0, 0), ''), content]) - stmt.parent = self._module - return stmt - - @parent.setter - def parent(self, value): - """Needed because the super class tries to set parent.""" + def __getattr__(self, name): + return getattr(self._module_context, name) diff --git a/pythonFiles/release/jedi/api/keywords.py b/pythonFiles/release/jedi/api/keywords.py index 2a54ba2d5817..a1bc4e7f8556 100755 --- a/pythonFiles/release/jedi/api/keywords.py +++ b/pythonFiles/release/jedi/api/keywords.py @@ -1,56 +1,111 @@ import pydoc import keyword -from jedi._compatibility import is_py3 -from jedi import common -from jedi.evaluate import compiled -from jedi.evaluate.helpers import FakeName +from jedi._compatibility import is_py3, is_py35 +from jedi.evaluate.utils import ignored +from jedi.evaluate.filters import AbstractNameDefinition +from parso.python.tree import Leaf try: from pydoc_data import topics as pydoc_topics except ImportError: - # Python 2.6 - import pydoc_topics + # Python 2 + try: + import pydoc_topics + except ImportError: + # This is for Python 3 embeddable version, which dont have + # pydoc_data module in its file python3x.zip. + pydoc_topics = None if is_py3: - keys = keyword.kwlist + if is_py35: + # in python 3.5 async and await are not proper keywords, but for + # completion pursposes should as as though they are + keys = keyword.kwlist + ["async", "await"] + else: + keys = keyword.kwlist else: keys = keyword.kwlist + ['None', 'False', 'True'] -def keywords(string='', pos=(0, 0), all=False): - if all: - return set([Keyword(k, pos) for k in keys]) +def has_inappropriate_leaf_keyword(pos, module): + relevant_errors = filter( + lambda error: error.first_pos[0] == pos[0], + module.error_statement_stacks) + + for error in relevant_errors: + if error.next_token in keys: + return True + + return False + + +def completion_names(evaluator, stmt, pos, module): + keyword_list = all_keywords(evaluator) + + if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module): + keyword_list = filter( + lambda keyword: not keyword.only_valid_as_leaf, + keyword_list + ) + return [keyword.name for keyword in keyword_list] + + +def all_keywords(evaluator, pos=(0, 0)): + return set([Keyword(evaluator, k, pos) for k in keys]) + + +def keyword(evaluator, string, pos=(0, 0)): if string in keys: - return set([Keyword(string, pos)]) - return set() + return Keyword(evaluator, string, pos) + else: + return None -def keyword_names(*args, **kwargs): - return [k.name for k in keywords(*args, **kwargs)] +def get_operator(evaluator, string, pos): + return Keyword(evaluator, string, pos) -def get_operator(string, pos): - return Keyword(string, pos) +keywords_only_valid_as_leaf = ( + 'continue', + 'break', +) + + +class KeywordName(AbstractNameDefinition): + api_type = 'keyword' + + def __init__(self, evaluator, name): + self.evaluator = evaluator + self.string_name = name + self.parent_context = evaluator.BUILTINS + + def eval(self): + return set() + + def infer(self): + return [Keyword(self.evaluator, self.string_name, (0, 0))] class Keyword(object): - def __init__(self, name, pos): - self.name = FakeName(name, self, pos) + api_type = 'keyword' + + def __init__(self, evaluator, name, pos): + self.name = KeywordName(evaluator, name) self.start_pos = pos - self.parent = compiled.builtin + self.parent = evaluator.BUILTINS - def get_parent_until(self): - return self.parent + @property + def only_valid_as_leaf(self): + return self.name.value in keywords_only_valid_as_leaf @property def names(self): """ For a `parsing.Name` like comparision """ return [self.name] - @property - def docstr(self): - return imitate_pydoc(self.name) + def py__doc__(self, include_call_signature=False): + return imitate_pydoc(self.name.string_name) def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.name) @@ -61,11 +116,14 @@ def imitate_pydoc(string): It's not possible to get the pydoc's without starting the annoying pager stuff. """ + if pydoc_topics is None: + return '' + # str needed because of possible unicode stuff in py2k (pydoc doesn't work # with unicode strings) string = str(string) h = pydoc.help - with common.ignored(KeyError): + with ignored(KeyError): # try to access symbols string = h.symbols[string] string, _, related = string.partition(' ') @@ -81,6 +139,6 @@ def imitate_pydoc(string): return '' try: - return pydoc_topics.topics[label] if pydoc_topics else '' + return pydoc_topics.topics[label].strip() if pydoc_topics else '' except KeyError: return '' diff --git a/pythonFiles/release/jedi/api/usages.py b/pythonFiles/release/jedi/api/usages.py deleted file mode 100755 index ecb885639032..000000000000 --- a/pythonFiles/release/jedi/api/usages.py +++ /dev/null @@ -1,49 +0,0 @@ -from jedi._compatibility import unicode -from jedi.api import classes -from jedi.parser import tree -from jedi.evaluate import imports - - -def usages(evaluator, definition_names, mods): - """ - :param definitions: list of Name - """ - def compare_array(definitions): - """ `definitions` are being compared by module/start_pos, because - sometimes the id's of the objects change (e.g. executions). - """ - result = [] - for d in definitions: - module = d.get_parent_until() - result.append((module, d.start_pos)) - return result - - search_name = unicode(list(definition_names)[0]) - compare_definitions = compare_array(definition_names) - mods |= set([d.get_parent_until() for d in definition_names]) - definitions = [] - for m in imports.get_modules_containing_name(evaluator, mods, search_name): - try: - check_names = m.used_names[search_name] - except KeyError: - continue - for name in check_names: - - result = evaluator.goto(name) - if [c for c in compare_array(result) if c in compare_definitions]: - definitions.append(classes.Definition(evaluator, name)) - # Previous definitions might be imports, so include them - # (because goto might return that import name). - compare_definitions += compare_array([name]) - return definitions - - -def usages_add_import_modules(evaluator, definitions): - """ Adds the modules of the imports """ - new = set() - for d in definitions: - imp_or_stmt = d.get_definition() - if isinstance(imp_or_stmt, tree.Import): - s = imports.ImportWrapper(evaluator, d) - new |= set(s.follow(is_goto=True)) - return set(definitions) | new diff --git a/pythonFiles/release/jedi/cache.py b/pythonFiles/release/jedi/cache.py index 56769d0d8953..01138e75a99b 100755 --- a/pythonFiles/release/jedi/cache.py +++ b/pythonFiles/release/jedi/cache.py @@ -3,8 +3,6 @@ nothing really spectacular, just some decorators. The following cache types are available: -- module caching (`load_parser` and `save_parser`), which uses pickle and is - really important to assure low load times of modules like ``numpy``. - ``time_cache`` can be used to cache something for just a limited time span, which can be useful if there's user interaction and the user cannot react faster than a certain time. @@ -14,35 +12,43 @@ these variables are being cleaned after every API usage. """ import time -import os -import sys -import json -import hashlib -import gc -import inspect -import shutil -import re -try: - import cPickle as pickle -except ImportError: - import pickle from jedi import settings -from jedi import common -from jedi import debug +from parso.cache import parser_cache _time_caches = {} -# for fast_parser, should not be deleted -parser_cache = {} +def underscore_memoization(func): + """ + Decorator for methods:: -class ParserCacheItem(object): - def __init__(self, parser, change_time=None): - self.parser = parser - if change_time is None: - change_time = time.time() - self.change_time = change_time + class A(object): + def x(self): + if self._x: + self._x = 10 + return self._x + + Becomes:: + + class A(object): + @underscore_memoization + def x(self): + return 10 + + A now has an attribute ``_x`` written by this decorator. + """ + name = '_' + func.__name__ + + def wrapper(self): + try: + return getattr(self, name) + except AttributeError: + result = func(self) + setattr(self, name, result) + return result + + return wrapper def clear_time_caches(delete_all=False): @@ -70,11 +76,12 @@ def clear_time_caches(delete_all=False): def time_cache(time_add_setting): """ - s This decorator works as follows: Call it with a setting and after that use the function with a callable that returns the key. But: This function is only called if the key is not available. After a certain amount of time (`time_add_setting`) the cache is invalid. + + If the given key is None, the function will not be cached. """ def _temp(key_func): dct = {} @@ -99,60 +106,11 @@ def wrapper(*args, **kwargs): return _temp -@time_cache("call_signatures_validity") -def cache_call_signatures(evaluator, call, source, user_pos): - """This function calculates the cache key.""" - index = user_pos[0] - 1 - lines = common.splitlines(source) - - before_cursor = lines[index][:user_pos[1]] - other_lines = lines[call.start_pos[0]:index] - whole = '\n'.join(other_lines + [before_cursor]) - before_bracket = re.match(r'.*\(', whole, re.DOTALL) - - module_path = call.get_parent_until().path - yield None if module_path is None else (module_path, before_bracket, call.start_pos) - yield evaluator.eval_element(call) - - -def underscore_memoization(func): - """ - Decorator for methods:: - - class A(object): - def x(self): - if self._x: - self._x = 10 - return self._x - - Becomes:: - - class A(object): - @underscore_memoization - def x(self): - return 10 - - A now has an attribute ``_x`` written by this decorator. - """ - name = '_' + func.__name__ - - def wrapper(self): - try: - return getattr(self, name) - except AttributeError: - result = func(self) - if inspect.isgenerator(result): - result = list(result) - setattr(self, name, result) - return result - - return wrapper - - def memoize_method(method): """A normal memoize function.""" def wrapper(self, *args, **kwargs): - dct = self.__dict__.setdefault('_memoize_method_dct', {}) + cache_dict = self.__dict__.setdefault('_memoize_method_dct', {}) + dct = cache_dict.setdefault(method, {}) key = (args, frozenset(kwargs.items())) try: return dct[key] @@ -161,176 +119,3 @@ def wrapper(self, *args, **kwargs): dct[key] = result return result return wrapper - - -def cache_star_import(func): - @time_cache("star_import_cache_validity") - def wrapper(self): - yield self.base # The cache key - yield func(self) - return wrapper - - -def _invalidate_star_import_cache_module(module, only_main=False): - """ Important if some new modules are being reparsed """ - try: - t, modules = _time_caches['star_import_cache_validity'][module] - except KeyError: - pass - else: - del _time_caches['star_import_cache_validity'][module] - - -def invalidate_star_import_cache(path): - """On success returns True.""" - try: - parser_cache_item = parser_cache[path] - except KeyError: - pass - else: - _invalidate_star_import_cache_module(parser_cache_item.parser.module) - - -def load_parser(path): - """ - Returns the module or None, if it fails. - """ - p_time = os.path.getmtime(path) if path else None - try: - parser_cache_item = parser_cache[path] - if not path or p_time <= parser_cache_item.change_time: - return parser_cache_item.parser - else: - # In case there is already a module cached and this module - # has to be reparsed, we also need to invalidate the import - # caches. - _invalidate_star_import_cache_module(parser_cache_item.parser.module) - except KeyError: - if settings.use_filesystem_cache: - return ParserPickling.load_parser(path, p_time) - - -def save_parser(path, parser, pickling=True): - try: - p_time = None if path is None else os.path.getmtime(path) - except OSError: - p_time = None - pickling = False - - item = ParserCacheItem(parser, p_time) - parser_cache[path] = item - if settings.use_filesystem_cache and pickling: - ParserPickling.save_parser(path, item) - - -class ParserPickling(object): - - version = 24 - """ - Version number (integer) for file system cache. - - Increment this number when there are any incompatible changes in - parser representation classes. For example, the following changes - are regarded as incompatible. - - - Class name is changed. - - Class is moved to another module. - - Defined slot of the class is changed. - """ - - def __init__(self): - self.__index = None - self.py_tag = 'cpython-%s%s' % sys.version_info[:2] - """ - Short name for distinguish Python implementations and versions. - - It's like `sys.implementation.cache_tag` but for Python < 3.3 - we generate something similar. See: - http://docs.python.org/3/library/sys.html#sys.implementation - - .. todo:: Detect interpreter (e.g., PyPy). - """ - - def load_parser(self, path, original_changed_time): - try: - pickle_changed_time = self._index[path] - except KeyError: - return None - if original_changed_time is not None \ - and pickle_changed_time < original_changed_time: - # the pickle file is outdated - return None - - with open(self._get_hashed_path(path), 'rb') as f: - try: - gc.disable() - parser_cache_item = pickle.load(f) - finally: - gc.enable() - - debug.dbg('pickle loaded: %s', path) - parser_cache[path] = parser_cache_item - return parser_cache_item.parser - - def save_parser(self, path, parser_cache_item): - self.__index = None - try: - files = self._index - except KeyError: - files = {} - self._index = files - - with open(self._get_hashed_path(path), 'wb') as f: - pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL) - files[path] = parser_cache_item.change_time - - self._flush_index() - - @property - def _index(self): - if self.__index is None: - try: - with open(self._get_path('index.json')) as f: - data = json.load(f) - except (IOError, ValueError): - self.__index = {} - else: - # 0 means version is not defined (= always delete cache): - if data.get('version', 0) != self.version: - self.clear_cache() - self.__index = {} - else: - self.__index = data['index'] - return self.__index - - def _remove_old_modules(self): - # TODO use - change = False - if change: - self._flush_index(self) - self._index # reload index - - def _flush_index(self): - data = {'version': self.version, 'index': self._index} - with open(self._get_path('index.json'), 'w') as f: - json.dump(data, f) - self.__index = None - - def clear_cache(self): - shutil.rmtree(self._cache_directory()) - - def _get_hashed_path(self, path): - return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) - - def _get_path(self, file): - dir = self._cache_directory() - if not os.path.exists(dir): - os.makedirs(dir) - return os.path.join(dir, file) - - def _cache_directory(self): - return os.path.join(settings.cache_directory, self.py_tag) - - -# is a singleton -ParserPickling = ParserPickling() diff --git a/pythonFiles/release/jedi/common/__init__.py b/pythonFiles/release/jedi/common/__init__.py new file mode 100644 index 000000000000..702a5e609985 --- /dev/null +++ b/pythonFiles/release/jedi/common/__init__.py @@ -0,0 +1 @@ +from jedi.common.context import BaseContextSet, BaseContext diff --git a/pythonFiles/release/jedi/common/context.py b/pythonFiles/release/jedi/common/context.py new file mode 100644 index 000000000000..dfcf5a9e17e6 --- /dev/null +++ b/pythonFiles/release/jedi/common/context.py @@ -0,0 +1,67 @@ +class BaseContext(object): + def __init__(self, evaluator, parent_context=None): + self.evaluator = evaluator + self.parent_context = parent_context + + def get_root_context(self): + context = self + while True: + if context.parent_context is None: + return context + context = context.parent_context + + +class BaseContextSet(object): + def __init__(self, *args): + self._set = set(args) + + @classmethod + def from_iterable(cls, iterable): + return cls.from_set(set(iterable)) + + @classmethod + def from_set(cls, set_): + self = cls() + self._set = set_ + return self + + @classmethod + def from_sets(cls, sets): + """ + Used to work with an iterable of set. + """ + aggregated = set() + sets = list(sets) + for set_ in sets: + if isinstance(set_, BaseContextSet): + aggregated |= set_._set + else: + aggregated |= set_ + return cls.from_set(aggregated) + + def __or__(self, other): + return type(self).from_set(self._set | other._set) + + def __iter__(self): + for element in self._set: + yield element + + def __bool__(self): + return bool(self._set) + + def __len__(self): + return len(self._set) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set)) + + def filter(self, filter_func): + return type(self).from_iterable(filter(filter_func, self._set)) + + def __getattr__(self, name): + def mapper(*args, **kwargs): + return type(self).from_sets( + getattr(context, name)(*args, **kwargs) + for context in self._set + ) + return mapper diff --git a/pythonFiles/release/jedi/debug.py b/pythonFiles/release/jedi/debug.py index 5c59a7b6de04..8caf1accb17f 100755 --- a/pythonFiles/release/jedi/debug.py +++ b/pythonFiles/release/jedi/debug.py @@ -1,25 +1,53 @@ from jedi._compatibility import encoding, is_py3, u -import inspect import os import time +def _lazy_colorama_init(): + """ + Lazily init colorama if necessary, not to screw up stdout is debug not + enabled. + + This version of the function does nothing. + """ + pass + +_inited=False + try: if os.name == 'nt': - # does not work on Windows, as pyreadline and colorama interfere + # Does not work on Windows, as pyreadline and colorama interfere raise ImportError else: # Use colorama for nicer console output. from colorama import Fore, init from colorama import initialise - # pytest resets the stream at the end - causes troubles. Since after - # every output the stream is reset automatically we don't need this. - initialise.atexit_done = True - init() + def _lazy_colorama_init(): + """ + Lazily init colorama if necessary, not to screw up stdout is + debug not enabled. + + This version of the function does init colorama. + """ + global _inited + if not _inited: + # pytest resets the stream at the end - causes troubles. Since + # after every output the stream is reset automatically we don't + # need this. + initialise.atexit_done = True + try: + init() + except Exception: + # Colorama fails with initializing under vim and is buggy in + # version 0.3.6. + pass + _inited = True + except ImportError: class Fore(object): RED = '' GREEN = '' YELLOW = '' + MAGENTA = '' RESET = '' NOTICE = object() @@ -32,15 +60,14 @@ class Fore(object): # callback, interface: level, str debug_function = None -ignored_modules = ['jedi.evaluate.builtin', 'jedi.parser'] -_debug_indent = -1 +_debug_indent = 0 _start_time = time.time() def reset_time(): global _start_time, _debug_indent _start_time = time.time() - _debug_indent = -1 + _debug_indent = 0 def increase_indent(func): @@ -49,44 +76,50 @@ def wrapper(*args, **kwargs): global _debug_indent _debug_indent += 1 try: - result = func(*args, **kwargs) + return func(*args, **kwargs) finally: _debug_indent -= 1 - return result return wrapper -def dbg(message, *args): +def dbg(message, *args, **kwargs): """ Looks at the stack, to see if a debug message should be printed. """ + # Python 2 compatibility, because it doesn't understand default args + color = kwargs.pop('color', 'GREEN') + assert color + if debug_function and enable_notice: - frm = inspect.stack()[1] - mod = inspect.getmodule(frm[0]) - if not (mod.__name__ in ignored_modules): - i = ' ' * _debug_indent - debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) + i = ' ' * _debug_indent + _lazy_colorama_init() + debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) + +def warning(message, *args, **kwargs): + format = kwargs.pop('format', True) + assert not kwargs -def warning(message, *args): if debug_function and enable_warning: i = ' ' * _debug_indent - debug_function(WARNING, i + 'warning: ' + message % tuple(u(repr(a)) for a in args)) + if format: + message = message % tuple(u(repr(a)) for a in args) + debug_function('RED', i + 'warning: ' + message) def speed(name): if debug_function and enable_speed: now = time.time() i = ' ' * _debug_indent - debug_function(SPEED, i + 'speed: ' + '%s %s' % (name, now - _start_time)) + debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time)) -def print_to_stdout(level, str_out): - """ The default debug function """ - if level == NOTICE: - col = Fore.GREEN - elif level == WARNING: - col = Fore.RED - else: - col = Fore.YELLOW +def print_to_stdout(color, str_out): + """ + The default debug function that prints to standard out. + + :param str color: A string that is an attribute of ``colorama.Fore``. + """ + col = getattr(Fore, color) + _lazy_colorama_init() if not is_py3: str_out = str_out.encode(encoding, 'replace') print(col + str_out + Fore.RESET) diff --git a/pythonFiles/release/jedi/evaluate/__init__.py b/pythonFiles/release/jedi/evaluate/__init__.py index a959d05f1835..20461071abdb 100755 --- a/pythonFiles/release/jedi/evaluate/__init__.py +++ b/pythonFiles/release/jedi/evaluate/__init__.py @@ -12,29 +12,31 @@ * The programmer is not a total dick, e.g. like `this `_ :-) -The actual algorithm is based on a principle called lazy evaluation. If you -don't know about it, google it. That said, the typical entry point for static -analysis is calling ``eval_statement``. There's separate logic for -autocompletion in the API, the evaluator is all about evaluating an expression. +The actual algorithm is based on a principle called lazy evaluation. That +said, the typical entry point for static analysis is calling +``eval_expr_stmt``. There's separate logic for autocompletion in the API, the +evaluator is all about evaluating an expression. -Now you need to understand what follows after ``eval_statement``. Let's +TODO this paragraph is not what jedi does anymore. + +Now you need to understand what follows after ``eval_expr_stmt``. Let's make an example:: import datetime datetime.date.toda# <-- cursor here First of all, this module doesn't care about completion. It really just cares -about ``datetime.date``. At the end of the procedure ``eval_statement`` will +about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will return the ``date`` class. To *visualize* this (simplified): -- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment. -- ``Evaluator.eval_element`` cares for resolving the dotted path +- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment. +- ``Context.eval_node`` cares for resolving the dotted path - ``Evaluator.find_types`` searches for global definitions of datetime, which it finds in the definition of an import, by scanning the syntax tree. - Using the import logic, the datetime module is found. -- Now ``find_types`` is called again by ``eval_element`` to find ``date`` +- Now ``find_types`` is called again by ``eval_node`` to find ``date`` inside the datetime module. Now what would happen if we wanted ``datetime.date.foo.bar``? Two more @@ -46,7 +48,7 @@ from foo import bar Date = bar.baz -Well... You get it. Just another ``eval_statement`` recursion. It's really +Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really easy. Python can obviously get way more complicated then this. To understand tuple assignments, list comprehensions and everything else, a lot more code had to be written. @@ -60,320 +62,298 @@ that are not used are just being ignored. """ -import copy -from itertools import chain +import sys + +from parso.python import tree +import parso -from jedi.parser import tree from jedi import debug -from jedi.evaluate import representation as er +from jedi import parser_utils +from jedi.evaluate.utils import unite from jedi.evaluate import imports from jedi.evaluate import recursion -from jedi.evaluate import iterable -from jedi.evaluate.cache import memoize_default -from jedi.evaluate import stdlib -from jedi.evaluate import finder +from jedi.evaluate.cache import evaluator_function_cache from jedi.evaluate import compiled -from jedi.evaluate import precedence -from jedi.evaluate import param from jedi.evaluate import helpers +from jedi.evaluate.filters import TreeNameDefinition, ParamName +from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \ + ContextSet, NO_CONTEXTS, iterate_contexts +from jedi.evaluate.context import ClassContext, FunctionContext, \ + AnonymousInstance, BoundMethod +from jedi.evaluate.context.iterable import CompForContext +from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \ + eval_node, check_tuple_assignments class Evaluator(object): - def __init__(self, grammar): + def __init__(self, grammar, project): self.grammar = grammar + self.latest_grammar = parso.load_grammar(version='3.6') self.memoize_cache = {} # for memoize decorators # To memorize modules -> equals `sys.modules`. self.modules = {} # like `sys.modules`. - self.compiled_cache = {} # see `compiled.create()` - self.recursion_detector = recursion.RecursionDetector() - self.execution_recursion_detector = recursion.ExecutionRecursionDetector() + self.compiled_cache = {} # see `evaluate.compiled.create()` + self.inferred_element_counts = {} + self.mixed_cache = {} # see `evaluate.compiled.mixed._create()` self.analysis = [] + self.dynamic_params_depth = 0 + self.is_analysis = False + self.python_version = sys.version_info[:2] + self.project = project + project.add_evaluator(self) - def wrap(self, element): - if isinstance(element, tree.Class): - return er.Class(self, element) - elif isinstance(element, tree.Function): - if isinstance(element, tree.Lambda): - return er.LambdaWrapper(self, element) - else: - return er.Function(self, element) - elif isinstance(element, (tree.Module)) \ - and not isinstance(element, er.ModuleWrapper): - return er.ModuleWrapper(self, element) - else: - return element - - def find_types(self, scope, name_str, position=None, search_global=False, - is_goto=False): - """ - This is the search function. The most important part to debug. - `remove_statements` and `filter_statements` really are the core part of - this completion. + self.reset_recursion_limitations() - :param position: Position of the last statement -> tuple of line, column - :return: List of Names. Their parents are the types. - """ - f = finder.NameFinder(self, scope, name_str, position) - scopes = f.scopes(search_global) - if is_goto: - return f.filter_name(scopes) - return f.find(scopes, search_global) - - @memoize_default(default=[], evaluator_is_first_arg=True) - @recursion.recursion_decorator - @debug.increase_indent - def eval_statement(self, stmt, seek_name=None): - """ - The starting point of the completion. A statement always owns a call - list, which are the calls, that a statement does. In case multiple - names are defined in the statement, `seek_name` returns the result for - this name. + # Constants + self.BUILTINS = compiled.get_special_object(self, 'BUILTINS') - :param stmt: A `tree.ExprStmt`. - """ - debug.dbg('eval_statement %s (%s)', stmt, seek_name) - types = self.eval_element(stmt.get_rhs()) - - if seek_name: - types = finder.check_tuple_assignments(types, seek_name) - - first_operation = stmt.first_operation() - if first_operation not in ('=', None) and not isinstance(stmt, er.InstanceElement): # TODO don't check for this. - # `=` is always the last character in aug assignments -> -1 - operator = copy.copy(first_operation) - operator.value = operator.value[:-1] - name = str(stmt.get_defined_names()[0]) - parent = self.wrap(stmt.get_parent_scope()) - left = self.find_types(parent, name, stmt.start_pos, search_global=True) - if isinstance(stmt.get_parent_until(tree.ForStmt), tree.ForStmt): - # Iterate through result and add the values, that's possible - # only in for loops without clutter, because they are - # predictable. - for r in types: - left = precedence.calculate(self, left, operator, [r]) - types = left - else: - types = precedence.calculate(self, left, operator, types) - debug.dbg('eval_statement result %s', types) - return types - - @memoize_default(evaluator_is_first_arg=True) - def eval_element(self, element): - if isinstance(element, iterable.AlreadyEvaluated): - return list(element) - elif isinstance(element, iterable.MergedNodes): - return iterable.unite(self.eval_element(e) for e in element) - - debug.dbg('eval_element %s@%s', element, element.start_pos) - if isinstance(element, (tree.Name, tree.Literal)) or tree.is_node(element, 'atom'): - return self._eval_atom(element) - elif isinstance(element, tree.Keyword): - # For False/True/None - if element.value in ('False', 'True', 'None'): - return [compiled.builtin.get_by_name(element.value)] + def reset_recursion_limitations(self): + self.recursion_detector = recursion.RecursionDetector() + self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) + + def eval_element(self, context, element): + if isinstance(context, CompForContext): + return eval_node(context, element) + + if_stmt = element + while if_stmt is not None: + if_stmt = if_stmt.parent + if if_stmt.type in ('if_stmt', 'for_stmt'): + break + if parser_utils.is_scope(if_stmt): + if_stmt = None + break + predefined_if_name_dict = context.predefined_names.get(if_stmt) + if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt': + if_stmt_test = if_stmt.children[1] + name_dicts = [{}] + # If we already did a check, we don't want to do it again -> If + # context.predefined_names is filled, we stop. + # We don't want to check the if stmt itself, it's just about + # the content. + if element.start_pos > if_stmt_test.end_pos: + # Now we need to check if the names in the if_stmt match the + # names in the suite. + if_names = helpers.get_names_of_node(if_stmt_test) + element_names = helpers.get_names_of_node(element) + str_element_names = [e.value for e in element_names] + if any(i.value in str_element_names for i in if_names): + for if_name in if_names: + definitions = self.goto_definitions(context, if_name) + # Every name that has multiple different definitions + # causes the complexity to rise. The complexity should + # never fall below 1. + if len(definitions) > 1: + if len(name_dicts) * len(definitions) > 16: + debug.dbg('Too many options for if branch evaluation %s.', if_stmt) + # There's only a certain amount of branches + # Jedi can evaluate, otherwise it will take to + # long. + name_dicts = [{}] + break + + original_name_dicts = list(name_dicts) + name_dicts = [] + for definition in definitions: + new_name_dicts = list(original_name_dicts) + for i, name_dict in enumerate(new_name_dicts): + new_name_dicts[i] = name_dict.copy() + new_name_dicts[i][if_name.value] = ContextSet(definition) + + name_dicts += new_name_dicts + else: + for name_dict in name_dicts: + name_dict[if_name.value] = definitions + if len(name_dicts) > 1: + result = ContextSet() + for name_dict in name_dicts: + with helpers.predefine_names(context, if_stmt, name_dict): + result |= eval_node(context, element) + return result else: - return [] - elif element.isinstance(tree.Lambda): - return [er.LambdaWrapper(self, element)] - elif element.isinstance(er.LambdaWrapper): - return [element] # TODO this is no real evaluation. - elif element.type == 'expr_stmt': - return self.eval_statement(element) - elif element.type == 'power': - types = self._eval_atom(element.children[0]) - for trailer in element.children[1:]: - if trailer == '**': # has a power operation. - raise NotImplementedError - types = self.eval_trailer(types, trailer) - - return types - elif element.type in ('testlist_star_expr', 'testlist',): - # The implicit tuple in statements. - return [iterable.ImplicitTuple(self, element)] - elif element.type in ('not_test', 'factor'): - types = self.eval_element(element.children[-1]) - for operator in element.children[:-1]: - types = list(precedence.factor_calculate(self, types, operator)) - return types - elif element.type == 'test': - # `x if foo else y` case. - return (self.eval_element(element.children[0]) + - self.eval_element(element.children[-1])) - elif element.type == 'operator': - # Must be an ellipsis, other operators are not evaluated. - return [] # Ignore for now. - elif element.type == 'dotted_name': - types = self._eval_atom(element.children[0]) - for next_name in element.children[2::2]: - types = list(chain.from_iterable(self.find_types(typ, next_name) - for typ in types)) - return types + return self._eval_element_if_evaluated(context, element) else: - return precedence.calculate_children(self, element.children) - - def _eval_atom(self, atom): - """ - Basically to process ``atom`` nodes. The parser sometimes doesn't - generate the node (because it has just one child). In that case an atom - might be a name or a literal as well. - """ - if isinstance(atom, tree.Name): - # This is the first global lookup. - stmt = atom.get_definition() - scope = stmt.get_parent_until(tree.IsScope, include_current=True) - if isinstance(stmt, tree.CompFor): - stmt = stmt.get_parent_until((tree.ClassOrFunc, tree.ExprStmt)) - if stmt.type != 'expr_stmt': - # We only need to adjust the start_pos for statements, because - # there the name cannot be used. - stmt = atom - return self.find_types(scope, atom, stmt.start_pos, search_global=True) - elif isinstance(atom, tree.Literal): - return [compiled.create(self, atom.eval())] - else: - c = atom.children - # Parentheses without commas are not tuples. - if c[0] == '(' and not len(c) == 2 \ - and not(tree.is_node(c[1], 'testlist_comp') - and len(c[1].children) > 1): - return self.eval_element(c[1]) - try: - comp_for = c[1].children[1] - except (IndexError, AttributeError): - pass + if predefined_if_name_dict: + return eval_node(context, element) else: - if isinstance(comp_for, tree.CompFor): - return [iterable.Comprehension.from_atom(self, atom)] - return [iterable.Array(self, atom)] - - def eval_trailer(self, types, trailer): - trailer_op, node = trailer.children[:2] - if node == ')': # `arglist` is optional. - node = () - new_types = [] - for typ in types: - debug.dbg('eval_trailer: %s in scope %s', trailer, typ) - if trailer_op == '.': - new_types += self.find_types(typ, node) - elif trailer_op == '(': - new_types += self.execute(typ, node, trailer) - elif trailer_op == '[': - try: - get = typ.get_index_types - except AttributeError: - debug.warning("TypeError: '%s' object is not subscriptable" - % typ) - else: - new_types += get(self, node) - return new_types + return self._eval_element_if_evaluated(context, element) - def execute_evaluated(self, obj, *args): + def _eval_element_if_evaluated(self, context, element): """ - Execute a function with already executed arguments. + TODO This function is temporary: Merge with eval_element. """ - args = [iterable.AlreadyEvaluated([arg]) for arg in args] - return self.execute(obj, args) - - @debug.increase_indent - def execute(self, obj, arguments=(), trailer=None): - if not isinstance(arguments, param.Arguments): - arguments = param.Arguments(self, arguments, trailer) - - if obj.isinstance(er.Function): - obj = obj.get_decorated_func() - - debug.dbg('execute: %s %s', obj, arguments) - try: - # Some stdlib functions like super(), namedtuple(), etc. have been - # hard-coded in Jedi to support them. - return stdlib.execute(self, obj, arguments) - except stdlib.NotInStdLib: - pass - - try: - func = obj.py__call__ - except AttributeError: - debug.warning("no execution possible %s", obj) - return [] - else: - types = func(self, arguments) - debug.dbg('execute result: %s in %s', types, obj) - return types - - def goto_definition(self, name): - def_ = name.get_definition() - if def_.type == 'expr_stmt' and name in def_.get_defined_names(): - return self.eval_statement(def_, name) - call = helpers.call_of_name(name) - return self.eval_element(call) - - def goto(self, name): - def resolve_implicit_imports(names): - for name in names: - if isinstance(name.parent, helpers.FakeImport): - # Those are implicit imports. - s = imports.ImportWrapper(self, name) - for n in s.follow(is_goto=True): - yield n - else: - yield name + parent = element + while parent is not None: + parent = parent.parent + predefined_if_name_dict = context.predefined_names.get(parent) + if predefined_if_name_dict is not None: + return eval_node(context, element) + return self._eval_element_cached(context, element) + + @evaluator_function_cache(default=NO_CONTEXTS) + def _eval_element_cached(self, context, element): + return eval_node(context, element) + + def goto_definitions(self, context, name): + def_ = name.get_definition(import_name_always=True) + if def_ is not None: + type_ = def_.type + if type_ == 'classdef': + return [ClassContext(self, context, name.parent)] + elif type_ == 'funcdef': + return [FunctionContext(self, context, name.parent)] + + if type_ == 'expr_stmt': + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return eval_expr_stmt(context, def_, name) + if type_ == 'for_stmt': + container_types = context.eval_node(def_.children[3]) + cn = ContextualizedNode(context, def_.children[3]) + for_types = iterate_contexts(container_types, cn) + c_node = ContextualizedName(context, name) + return check_tuple_assignments(self, c_node, for_types) + if type_ in ('import_from', 'import_name'): + return imports.infer_import(context, name) + + return helpers.evaluate_call_of_leaf(context, name) + + def goto(self, context, name): + definition = name.get_definition(import_name_always=True) + if definition is not None: + type_ = definition.type + if type_ == 'expr_stmt': + # Only take the parent, because if it's more complicated than just + # a name it's something you can "goto" again. + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return [TreeNameDefinition(context, name)] + elif type_ == 'param': + return [ParamName(context, name)] + elif type_ in ('funcdef', 'classdef'): + return [TreeNameDefinition(context, name)] + elif type_ in ('import_from', 'import_name'): + module_names = imports.infer_import(context, name, is_goto=True) + return module_names - stmt = name.get_definition() par = name.parent - if par.type == 'argument' and par.children[1] == '=' and par.children[0] == name: + node_type = par.type + if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name: # Named param goto. trailer = par.parent if trailer.type == 'arglist': trailer = trailer.parent if trailer.type != 'classdef': if trailer.type == 'decorator': - types = self.eval_element(trailer.children[1]) + context_set = context.eval_node(trailer.children[1]) else: i = trailer.parent.children.index(trailer) to_evaluate = trailer.parent.children[:i] - types = self.eval_element(to_evaluate[0]) + if to_evaluate[0] == 'await': + to_evaluate.pop(0) + context_set = context.eval_node(to_evaluate[0]) for trailer in to_evaluate[1:]: - types = self.eval_trailer(types, trailer) + context_set = eval_trailer(context, context_set, trailer) param_names = [] - for typ in types: + for context in context_set: try: - params = typ.params + get_param_names = context.get_param_names except AttributeError: pass else: - param_names += [param.name for param in params - if param.name.value == name.value] + for param_name in get_param_names(): + if param_name.string_name == name.value: + param_names.append(param_name) return param_names - elif isinstance(par, tree.ExprStmt) and name in par.get_defined_names(): - # Only take the parent, because if it's more complicated than just - # a name it's something you can "goto" again. - return [name] - elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name: - return [name] - elif isinstance(stmt, tree.Import): - modules = imports.ImportWrapper(self, name).follow(is_goto=True) - return list(resolve_implicit_imports(modules)) - elif par.type == 'dotted_name': # Is a decorator. + elif node_type == 'dotted_name': # Is a decorator. index = par.children.index(name) if index > 0: new_dotted = helpers.deep_ast_copy(par) new_dotted.children[index - 1:] = [] - types = self.eval_element(new_dotted) - return resolve_implicit_imports(iterable.unite( - self.find_types(typ, name, is_goto=True) for typ in types - )) - - scope = name.get_parent_scope() - if tree.is_node(name.parent, 'trailer'): - call = helpers.call_of_name(name, cut_own_trailer=True) - types = self.eval_element(call) - return resolve_implicit_imports(iterable.unite( - self.find_types(typ, name, is_goto=True) for typ in types - )) + values = context.eval_node(new_dotted) + return unite( + value.py__getattribute__(name, name_context=context, is_goto=True) + for value in values + ) + + if node_type == 'trailer' and par.children[0] == '.': + values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True) + return unite( + value.py__getattribute__(name, name_context=context, is_goto=True) + for value in values + ) else: - if stmt.type != 'expr_stmt': - # We only need to adjust the start_pos for statements, because - # there the name cannot be used. + stmt = tree.search_ancestor( + name, 'expr_stmt', 'lambdef' + ) or name + if stmt.type == 'lambdef': stmt = name - return self.find_types(scope, name, stmt.start_pos, - search_global=True, is_goto=True) + return context.py__getattribute__( + name, + position=stmt.start_pos, + search_global=True, is_goto=True + ) + + def create_context(self, base_context, node, node_is_context=False, node_is_object=False): + def parent_scope(node): + while True: + node = node.parent + + if parser_utils.is_scope(node): + return node + elif node.type in ('argument', 'testlist_comp'): + if node.children[1].type == 'comp_for': + return node.children[1] + elif node.type == 'dictorsetmaker': + for n in node.children[1:4]: + # In dictionaries it can be pretty much anything. + if n.type == 'comp_for': + return n + + def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False): + if scope_node == base_node: + return base_context + + is_funcdef = scope_node.type in ('funcdef', 'lambdef') + parent_scope = parser_utils.get_parent_scope(scope_node) + parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef) + + if is_funcdef: + if isinstance(parent_context, AnonymousInstance): + func = BoundMethod( + self, parent_context, parent_context.class_context, + parent_context.parent_context, scope_node + ) + else: + func = FunctionContext( + self, + parent_context, + scope_node + ) + if is_nested and not node_is_object: + return func.get_function_execution() + return func + elif scope_node.type == 'classdef': + class_context = ClassContext(self, parent_context, scope_node) + if child_is_funcdef: + # anonymous instance + return AnonymousInstance(self, parent_context, class_context) + else: + return class_context + elif scope_node.type == 'comp_for': + if node.start_pos >= scope_node.children[-1].start_pos: + return parent_context + return CompForContext.from_comp_for(parent_context, scope_node) + raise Exception("There's a scope that was not managed.") + + base_node = base_context.tree_node + + if node_is_context and parser_utils.is_scope(node): + scope_node = node + else: + if node.parent.type in ('funcdef', 'classdef') and node.parent.name == node: + # When we're on class/function names/leafs that define the + # object itself and not its contents. + node = node.parent + scope_node = parent_scope(node) + return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object) diff --git a/pythonFiles/release/jedi/evaluate/analysis.py b/pythonFiles/release/jedi/evaluate/analysis.py index d4a411f42b68..c825e5fef9e9 100755 --- a/pythonFiles/release/jedi/evaluate/analysis.py +++ b/pythonFiles/release/jedi/evaluate/analysis.py @@ -2,7 +2,7 @@ Module for statical analysis. """ from jedi import debug -from jedi.parser import tree +from parso.python import tree from jedi.evaluate.compiled import CompiledObject @@ -10,14 +10,18 @@ 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), 'name-error': (2, NameError, 'Potential NameError.'), 'import-error': (3, ImportError, 'Potential ImportError.'), - 'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."), - 'type-error-too-many-arguments': (5, TypeError, None), - 'type-error-too-few-arguments': (6, TypeError, None), - 'type-error-keyword-argument': (7, TypeError, None), - 'type-error-multiple-values': (8, TypeError, None), - 'type-error-star-star': (9, TypeError, None), - 'type-error-star': (10, TypeError, None), - 'type-error-operation': (11, TypeError, None), + 'type-error-too-many-arguments': (4, TypeError, None), + 'type-error-too-few-arguments': (5, TypeError, None), + 'type-error-keyword-argument': (6, TypeError, None), + 'type-error-multiple-values': (7, TypeError, None), + 'type-error-star-star': (8, TypeError, None), + 'type-error-star': (9, TypeError, None), + 'type-error-operation': (10, TypeError, None), + 'type-error-not-iterable': (11, TypeError, None), + 'type-error-isinstance': (12, TypeError, None), + 'type-error-not-subscriptable': (13, TypeError, None), + 'value-error-too-many-values': (14, ValueError, None), + 'value-error-too-few-values': (15, ValueError, None), } @@ -52,8 +56,8 @@ def __str__(self): return self.__unicode__() def __eq__(self, other): - return (self.path == other.path and self.name == other.name - and self._start_pos == other._start_pos) + return (self.path == other.path and self.name == other.name and + self._start_pos == other._start_pos) def __ne__(self, other): return not self.__eq__(other) @@ -71,61 +75,61 @@ class Warning(Error): pass -def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): - from jedi.evaluate.iterable import MergedNodes - while isinstance(jedi_obj, MergedNodes): - if len(jedi_obj) != 1: - # TODO is this kosher? - return - jedi_obj = list(jedi_obj)[0] - - exception = CODES[name][1] - if _check_for_exception_catch(evaluator, jedi_obj, exception, payload): +def add(node_context, error_name, node, message=None, typ=Error, payload=None): + exception = CODES[error_name][1] + if _check_for_exception_catch(node_context, node, exception, payload): return - module_path = jedi_obj.get_parent_until().path - instance = typ(name, module_path, jedi_obj.start_pos, message) - debug.warning(str(instance)) - evaluator.analysis.append(instance) + # TODO this path is probably not right + module_context = node_context.get_root_context() + module_path = module_context.py__file__() + instance = typ(error_name, module_path, node.start_pos, message) + debug.warning(str(instance), format=False) + node_context.evaluator.analysis.append(instance) def _check_for_setattr(instance): """ Check if there's any setattr method inside an instance. If so, return True. """ - module = instance.get_parent_until() + from jedi.evaluate.context import ModuleContext + module = instance.get_root_context() + if not isinstance(module, ModuleContext): + return False + + node = module.tree_node try: - stmts = module.used_names['setattr'] + stmts = node.get_used_names()['setattr'] except KeyError: return False - return any(instance.start_pos < stmt.start_pos < instance.end_pos + return any(node.start_pos < stmt.start_pos < node.end_pos for stmt in stmts) -def add_attribute_error(evaluator, scope, name): - message = ('AttributeError: %s has no attribute %s.' % (scope, name)) - from jedi.evaluate.representation import Instance +def add_attribute_error(name_context, lookup_context, name): + message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name)) + from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName # Check for __getattr__/__getattribute__ existance and issue a warning # instead of an error, if that happens. - if isinstance(scope, Instance): - typ = Warning - try: - scope.get_subscope_by_name('__getattr__') - except KeyError: - try: - scope.get_subscope_by_name('__getattribute__') - except KeyError: - if not _check_for_setattr(scope): - typ = Error - else: - typ = Error + typ = Error + if isinstance(lookup_context, AbstractInstanceContext): + slot_names = lookup_context.get_function_slot_names('__getattr__') + \ + lookup_context.get_function_slot_names('__getattribute__') + for n in slot_names: + if isinstance(name, CompiledInstanceName) and \ + n.parent_context.obj == object: + typ = Warning + break + + if _check_for_setattr(lookup_context): + typ = Warning - payload = scope, name - add(evaluator, 'attribute-error', name, message, typ, payload) + payload = lookup_context, name + add(name_context, 'attribute-error', name, message, typ, payload) -def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None): +def _check_for_exception_catch(node_context, jedi_name, exception, payload=None): """ Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and doesn't count as an error (if equal to `exception`). @@ -146,157 +150,65 @@ def check_try_for_except(obj, exception): colon = next(iterator) suite = next(iterator) if branch_type == 'try' \ - and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos): + and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos): return False - for node in obj.except_clauses(): + for node in obj.get_except_clause_tests(): if node is None: return True # An exception block that catches everything. else: - except_classes = evaluator.eval_element(node) + except_classes = node_context.eval_node(node) for cls in except_classes: - from jedi.evaluate import iterable - if isinstance(cls, iterable.Array) and cls.type == 'tuple': + from jedi.evaluate.context import iterable + if isinstance(cls, iterable.AbstractIterable) and \ + cls.array_type == 'tuple': # multiple exceptions - for c in cls.values(): - if check_match(c, exception): - return True + for lazy_context in cls.py__iter__(): + for typ in lazy_context.infer(): + if check_match(typ, exception): + return True else: if check_match(cls, exception): return True def check_hasattr(node, suite): try: - assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos - assert node.type == 'power' + assert suite.start_pos <= jedi_name.start_pos < suite.end_pos + assert node.type in ('power', 'atom_expr') base = node.children[0] assert base.type == 'name' and base.value == 'hasattr' trailer = node.children[1] assert trailer.type == 'trailer' arglist = trailer.children[1] assert arglist.type == 'arglist' - from jedi.evaluate.param import Arguments - args = list(Arguments(evaluator, arglist).unpack()) + from jedi.evaluate.arguments import TreeArguments + args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack()) # Arguments should be very simple assert len(args) == 2 # Check name - key, values = args[1] - assert len(values) == 1 - names = evaluator.eval_element(values[0]) + key, lazy_context = args[1] + names = list(lazy_context.infer()) assert len(names) == 1 and isinstance(names[0], CompiledObject) - assert names[0].obj == str(payload[1]) + assert names[0].obj == payload[1].value # Check objects - key, values = args[0] - assert len(values) == 1 - objects = evaluator.eval_element(values[0]) + key, lazy_context = args[0] + objects = lazy_context.infer() return payload[0] in objects except AssertionError: return False - obj = jedi_obj - while obj is not None and not obj.isinstance(tree.Function, tree.Class): - if obj.isinstance(tree.Flow): + obj = jedi_name + while obj is not None and not isinstance(obj, (tree.Function, tree.Class)): + if isinstance(obj, tree.Flow): # try/except catch check - if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception): + if obj.type == 'try_stmt' and check_try_for_except(obj, exception): return True # hasattr check - if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt): + if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'): if check_hasattr(obj.children[1], obj.children[3]): return True obj = obj.parent return False - - -def get_module_statements(module): - """ - Returns the statements used in a module. All these statements should be - evaluated to check for potential exceptions. - """ - def check_children(node): - try: - children = node.children - except AttributeError: - return [] - else: - nodes = [] - for child in children: - nodes += check_children(child) - if child.type == 'trailer': - c = child.children - if c[0] == '(' and c[1] != ')': - if c[1].type != 'arglist': - if c[1].type == 'argument': - nodes.append(c[1].children[-1]) - else: - nodes.append(c[1]) - else: - for argument in c[1].children: - if argument.type == 'argument': - nodes.append(argument.children[-1]) - elif argument.type != 'operator': - nodes.append(argument) - return nodes - - def add_nodes(nodes): - new = set() - for node in nodes: - if isinstance(node, tree.Flow): - children = node.children - if node.type == 'for_stmt': - children = children[2:] # Don't want to include the names. - # Pick the suite/simple_stmt. - new |= add_nodes(children) - elif node.type in ('simple_stmt', 'suite'): - new |= add_nodes(node.children) - elif node.type in ('return_stmt', 'yield_expr'): - try: - new.add(node.children[1]) - except IndexError: - pass - elif node.type not in ('whitespace', 'operator', 'keyword', - 'parameters', 'decorated', 'except_clause') \ - and not isinstance(node, (tree.ClassOrFunc, tree.Import)): - new.add(node) - - try: - children = node.children - except AttributeError: - pass - else: - for next_node in children: - new.update(check_children(node)) - if next_node.type != 'keyword' and node.type != 'expr_stmt': - new.add(node) - return new - - nodes = set() - import_names = set() - decorated_funcs = [] - for scope in module.walk(): - for imp in set(scope.imports): - import_names |= set(imp.get_defined_names()) - if imp.is_nested(): - import_names |= set(path[-1] for path in imp.paths()) - - children = scope.children - if isinstance(scope, tree.ClassOrFunc): - children = children[2:] # We don't want to include the class name. - nodes |= add_nodes(children) - - for flow in scope.flows: - if flow.type == 'for_stmt': - nodes.add(flow.children[3]) - elif flow.type == 'try_stmt': - nodes.update(e for e in flow.except_clauses() if e is not None) - - try: - decorators = scope.get_decorators() - except AttributeError: - pass - else: - if decorators: - decorated_funcs.append(scope) - return nodes, import_names, decorated_funcs diff --git a/pythonFiles/release/jedi/evaluate/arguments.py b/pythonFiles/release/jedi/evaluate/arguments.py new file mode 100644 index 000000000000..32b9238c6f4d --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/arguments.py @@ -0,0 +1,245 @@ +from parso.python import tree + +from jedi._compatibility import zip_longest +from jedi import debug +from jedi.evaluate import analysis +from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ + LazyTreeContext, get_merged_lazy_context +from jedi.evaluate.filters import ParamName +from jedi.evaluate.base_context import NO_CONTEXTS +from jedi.evaluate.context import iterable +from jedi.evaluate.param import get_params, ExecutedParam + +def try_iter_content(types, depth=0): + """Helper method for static analysis.""" + if depth > 10: + # It's possible that a loop has references on itself (especially with + # CompiledObject). Therefore don't loop infinitely. + return + + for typ in types: + try: + f = typ.py__iter__ + except AttributeError: + pass + else: + for lazy_context in f(): + try_iter_content(lazy_context.infer(), depth + 1) + + +class AbstractArguments(object): + context = None + + def eval_argument_clinic(self, parameters): + """Uses a list with argument clinic information (see PEP 436).""" + iterator = self.unpack() + for i, (name, optional, allow_kwargs) in enumerate(parameters): + key, argument = next(iterator, (None, None)) + if key is not None: + raise NotImplementedError + if argument is None and not optional: + debug.warning('TypeError: %s expected at least %s arguments, got %s', + name, len(parameters), i) + raise ValueError + values = NO_CONTEXTS if argument is None else argument.infer() + + if not values and not optional: + # For the stdlib we always want values. If we don't get them, + # that's ok, maybe something is too hard to resolve, however, + # we will not proceed with the evaluation of that function. + debug.warning('argument_clinic "%s" not resolvable.', name) + raise ValueError + yield values + + def eval_all(self, funcdef=None): + """ + Evaluates all arguments as a support for static analysis + (normally Jedi). + """ + for key, lazy_context in self.unpack(): + types = lazy_context.infer() + try_iter_content(types) + + def get_calling_nodes(self): + raise NotImplementedError + + def unpack(self, funcdef=None): + raise NotImplementedError + + def get_params(self, execution_context): + return get_params(execution_context, self) + + +class AnonymousArguments(AbstractArguments): + def get_params(self, execution_context): + from jedi.evaluate.dynamic import search_params + return search_params( + execution_context.evaluator, + execution_context, + execution_context.tree_node + ) + + +class TreeArguments(AbstractArguments): + def __init__(self, evaluator, context, argument_node, trailer=None): + """ + The argument_node is either a parser node or a list of evaluated + objects. Those evaluated objects may be lists of evaluated objects + themselves (one list for the first argument, one for the second, etc). + + :param argument_node: May be an argument_node or a list of nodes. + """ + self.argument_node = argument_node + self.context = context + self._evaluator = evaluator + self.trailer = trailer # Can be None, e.g. in a class definition. + + def _split(self): + if isinstance(self.argument_node, (tuple, list)): + for el in self.argument_node: + yield 0, el + else: + if not (self.argument_node.type == 'arglist' or ( + # in python 3.5 **arg is an argument, not arglist + (self.argument_node.type == 'argument') and + self.argument_node.children[0] in ('*', '**'))): + yield 0, self.argument_node + return + + iterator = iter(self.argument_node.children) + for child in iterator: + if child == ',': + continue + elif child in ('*', '**'): + yield len(child.value), next(iterator) + elif child.type == 'argument' and \ + child.children[0] in ('*', '**'): + assert len(child.children) == 2 + yield len(child.children[0].value), child.children[1] + else: + yield 0, child + + def unpack(self, funcdef=None): + named_args = [] + for star_count, el in self._split(): + if star_count == 1: + arrays = self.context.eval_node(el) + iterators = [_iterate_star_args(self.context, a, el, funcdef) + for a in arrays] + iterators = list(iterators) + for values in list(zip_longest(*iterators)): + # TODO zip_longest yields None, that means this would raise + # an exception? + yield None, get_merged_lazy_context( + [v for v in values if v is not None] + ) + elif star_count == 2: + arrays = self._evaluator.eval_element(self.context, el) + for dct in arrays: + for key, values in _star_star_dict(self.context, dct, el, funcdef): + yield key, values + else: + if el.type == 'argument': + c = el.children + if len(c) == 3: # Keyword argument. + named_args.append((c[0].value, LazyTreeContext(self.context, c[2]),)) + else: # Generator comprehension. + # Include the brackets with the parent. + comp = iterable.GeneratorComprehension( + self._evaluator, self.context, self.argument_node.parent) + yield None, LazyKnownContext(comp) + else: + yield None, LazyTreeContext(self.context, el) + + # Reordering var_args is necessary, because star args sometimes appear + # after named argument, but in the actual order it's prepended. + for named_arg in named_args: + yield named_arg + + def as_tree_tuple_objects(self): + for star_count, argument in self._split(): + if argument.type == 'argument': + argument, default = argument.children[::2] + else: + default = None + yield argument, default, star_count + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.argument_node) + + def get_calling_nodes(self): + from jedi.evaluate.dynamic import MergedExecutedParams + old_arguments_list = [] + arguments = self + + while arguments not in old_arguments_list: + if not isinstance(arguments, TreeArguments): + break + + old_arguments_list.append(arguments) + for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())): + if not star_count or not isinstance(name, tree.Name): + continue + + names = self._evaluator.goto(arguments.context, name) + if len(names) != 1: + break + if not isinstance(names[0], ParamName): + break + param = names[0].get_param() + if isinstance(param, MergedExecutedParams): + # For dynamic searches we don't even want to see errors. + return [] + if not isinstance(param, ExecutedParam): + break + if param.var_args is None: + break + arguments = param.var_args + break + + return [arguments.argument_node or arguments.trailer] + + +class ValuesArguments(AbstractArguments): + def __init__(self, values_list): + self._values_list = values_list + + def unpack(self, funcdef=None): + for values in self._values_list: + yield None, LazyKnownContexts(values) + + def get_calling_nodes(self): + return [] + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._values_list) + + +def _iterate_star_args(context, array, input_node, funcdef=None): + try: + iter_ = array.py__iter__ + except AttributeError: + if funcdef is not None: + # TODO this funcdef should not be needed. + m = "TypeError: %s() argument after * must be a sequence, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star', input_node, message=m) + else: + for lazy_context in iter_(): + yield lazy_context + + +def _star_star_dict(context, array, input_node, funcdef): + from jedi.evaluate.context.instance import CompiledInstance + if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': + # For now ignore this case. In the future add proper iterators and just + # make one call without crazy isinstance checks. + return {} + elif isinstance(array, iterable.AbstractIterable) and array.array_type == 'dict': + return array.exact_key_items() + else: + if funcdef is not None: + m = "TypeError: %s argument after ** must be a mapping, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star-star', input_node, message=m) + return {} diff --git a/pythonFiles/release/jedi/evaluate/base_context.py b/pythonFiles/release/jedi/evaluate/base_context.py new file mode 100644 index 000000000000..693a99aae7aa --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/base_context.py @@ -0,0 +1,260 @@ +from parso.python.tree import ExprStmt, CompFor + +from jedi import debug +from jedi._compatibility import Python3Method, zip_longest, unicode +from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature +from jedi.common import BaseContextSet, BaseContext + + +class Context(BaseContext): + """ + Should be defined, otherwise the API returns empty types. + """ + + predefined_names = {} + tree_node = None + """ + To be defined by subclasses. + """ + + @property + def api_type(self): + # By default just lower name of the class. Can and should be + # overwritten. + return self.__class__.__name__.lower() + + @debug.increase_indent + def execute(self, arguments): + """ + In contrast to py__call__ this function is always available. + + `hasattr(x, py__call__)` can also be checked to see if a context is + executable. + """ + if self.evaluator.is_analysis: + arguments.eval_all() + + debug.dbg('execute: %s %s', self, arguments) + from jedi.evaluate import stdlib + try: + # Some stdlib functions like super(), namedtuple(), etc. have been + # hard-coded in Jedi to support them. + return stdlib.execute(self.evaluator, self, arguments) + except stdlib.NotInStdLib: + pass + + try: + func = self.py__call__ + except AttributeError: + debug.warning("no execution possible %s", self) + return NO_CONTEXTS + else: + context_set = func(arguments) + debug.dbg('execute result: %s in %s', context_set, self) + return context_set + + return self.evaluator.execute(self, arguments) + + def execute_evaluated(self, *value_list): + """ + Execute a function with already executed arguments. + """ + from jedi.evaluate.arguments import ValuesArguments + arguments = ValuesArguments([ContextSet(value) for value in value_list]) + return self.execute(arguments) + + def iterate(self, contextualized_node=None): + debug.dbg('iterate') + try: + iter_method = self.py__iter__ + except AttributeError: + if contextualized_node is not None: + from jedi.evaluate import analysis + analysis.add( + contextualized_node.context, + 'type-error-not-iterable', + contextualized_node.node, + message="TypeError: '%s' object is not iterable" % self) + return iter([]) + else: + return iter_method() + + def get_item(self, index_contexts, contextualized_node): + from jedi.evaluate.compiled import CompiledObject + from jedi.evaluate.context.iterable import Slice, AbstractIterable + result = ContextSet() + + for index in index_contexts: + if isinstance(index, (CompiledObject, Slice)): + index = index.obj + + if type(index) not in (float, int, str, unicode, slice, type(Ellipsis)): + # If the index is not clearly defined, we have to get all the + # possiblities. + if isinstance(self, AbstractIterable) and self.array_type == 'dict': + result |= self.dict_values() + else: + result |= iterate_contexts(ContextSet(self)) + continue + + # The actual getitem call. + try: + getitem = self.py__getitem__ + except AttributeError: + from jedi.evaluate import analysis + # TODO this context is probably not right. + analysis.add( + contextualized_node.context, + 'type-error-not-subscriptable', + contextualized_node.node, + message="TypeError: '%s' object is not subscriptable" % self + ) + else: + try: + result |= getitem(index) + except IndexError: + result |= iterate_contexts(ContextSet(self)) + except KeyError: + # Must be a dict. Lists don't raise KeyErrors. + result |= self.dict_values() + return result + + def eval_node(self, node): + return self.evaluator.eval_element(self, node) + + @Python3Method + def py__getattribute__(self, name_or_str, name_context=None, position=None, + search_global=False, is_goto=False, + analysis_errors=True): + """ + :param position: Position of the last statement -> tuple of line, column + """ + if name_context is None: + name_context = self + from jedi.evaluate import finder + f = finder.NameFinder(self.evaluator, self, name_context, name_or_str, + position, analysis_errors=analysis_errors) + filters = f.get_filters(search_global) + if is_goto: + return f.filter_name(filters) + return f.find(filters, attribute_lookup=not search_global) + + return self.evaluator.find_types( + self, name_or_str, name_context, position, search_global, is_goto, + analysis_errors) + + def create_context(self, node, node_is_context=False, node_is_object=False): + return self.evaluator.create_context(self, node, node_is_context, node_is_object) + + def is_class(self): + return False + + def py__bool__(self): + """ + Since Wrapper is a super class for classes, functions and modules, + the return value will always be true. + """ + return True + + def py__doc__(self, include_call_signature=False): + try: + self.tree_node.get_doc_node + except AttributeError: + return '' + else: + if include_call_signature: + return get_doc_with_call_signature(self.tree_node) + else: + return clean_scope_docstring(self.tree_node) + return None + + +def iterate_contexts(contexts, contextualized_node=None): + """ + Calls `iterate`, on all contexts but ignores the ordering and just returns + all contexts that the iterate functions yield. + """ + return ContextSet.from_sets( + lazy_context.infer() + for lazy_context in contexts.iterate(contextualized_node) + ) + + +class TreeContext(Context): + def __init__(self, evaluator, parent_context=None): + super(TreeContext, self).__init__(evaluator, parent_context) + self.predefined_names = {} + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.tree_node) + + +class ContextualizedNode(object): + def __init__(self, context, node): + self.context = context + self.node = node + + def get_root_context(self): + return self.context.get_root_context() + + def infer(self): + return self.context.eval_node(self.node) + + +class ContextualizedName(ContextualizedNode): + # TODO merge with TreeNameDefinition?! + @property + def name(self): + return self.node + + def assignment_indexes(self): + """ + Returns an array of tuple(int, node) of the indexes that are used in + tuple assignments. + + For example if the name is ``y`` in the following code:: + + x, (y, z) = 2, '' + + would result in ``[(1, xyz_node), (0, yz_node)]``. + """ + indexes = [] + node = self.node.parent + compare = self.node + while node is not None: + if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): + for i, child in enumerate(node.children): + if child == compare: + indexes.insert(0, (int(i / 2), node)) + break + else: + raise LookupError("Couldn't find the assignment.") + elif isinstance(node, (ExprStmt, CompFor)): + break + + compare = node + node = node.parent + return indexes + + +class ContextSet(BaseContextSet): + def py__class__(self): + return ContextSet.from_iterable(c.py__class__() for c in self._set) + + def iterate(self, contextualized_node=None): + from jedi.evaluate.lazy_context import get_merged_lazy_context + type_iters = [c.iterate(contextualized_node) for c in self._set] + for lazy_contexts in zip_longest(*type_iters): + yield get_merged_lazy_context( + [l for l in lazy_contexts if l is not None] + ) + + +NO_CONTEXTS = ContextSet() + + +def iterator_to_context_set(func): + def wrapper(*args, **kwargs): + return ContextSet.from_iterable(func(*args, **kwargs)) + + return wrapper diff --git a/pythonFiles/release/jedi/evaluate/cache.py b/pythonFiles/release/jedi/evaluate/cache.py index b44f812accd2..b7c7cd7e979f 100755 --- a/pythonFiles/release/jedi/evaluate/cache.py +++ b/pythonFiles/release/jedi/evaluate/cache.py @@ -1,15 +1,13 @@ """ -- the popular ``memoize_default`` works like a typical memoize and returns the +- the popular ``_memoize_default`` works like a typical memoize and returns the default otherwise. -- ``CachedMetaClass`` uses ``memoize_default`` to do the same with classes. +- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes. """ -import inspect +_NO_DEFAULT = object() -NO_DEFAULT = object() - -def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): +def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): """ This is a typical memoization decorator, BUT there is one difference: To prevent recursion it sets defaults. @@ -19,12 +17,13 @@ def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg """ def func(function): def wrapper(obj, *args, **kwargs): + # TODO These checks are kind of ugly and slow. if evaluator_is_first_arg: cache = obj.memoize_cache - elif second_arg_is_evaluator: # needed for meta classes - cache = args[0].memoize_cache + elif second_arg_is_evaluator: + cache = args[0].memoize_cache # needed for meta classes else: - cache = obj._evaluator.memoize_cache + cache = obj.evaluator.memoize_cache try: memo = cache[function] @@ -36,23 +35,43 @@ def wrapper(obj, *args, **kwargs): if key in memo: return memo[key] else: - if default is not NO_DEFAULT: + if default is not _NO_DEFAULT: memo[key] = default rv = function(obj, *args, **kwargs) - if inspect.isgenerator(rv): - rv = list(rv) memo[key] = rv return rv return wrapper + return func +def evaluator_function_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default, evaluator_is_first_arg=True)(func) + + return decorator + + +def evaluator_method_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default)(func) + + return decorator + + +def _memoize_meta_class(): + def decorator(call): + return _memoize_default(second_arg_is_evaluator=True)(call) + + return decorator + + class CachedMetaClass(type): """ This is basically almost the same than the decorator above, it just caches class initializations. Either you do it this way or with decorators, but with decorators you lose class access (isinstance, etc). """ - @memoize_default(None, second_arg_is_evaluator=True) + @_memoize_meta_class() def __call__(self, *args, **kwargs): return super(CachedMetaClass, self).__call__(*args, **kwargs) diff --git a/pythonFiles/release/jedi/evaluate/compiled/__init__.py b/pythonFiles/release/jedi/evaluate/compiled/__init__.py index 67d3f37119d3..f9f2e0781e13 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/__init__.py +++ b/pythonFiles/release/jedi/evaluate/compiled/__init__.py @@ -5,14 +5,17 @@ import re import sys import os +import types from functools import partial -from jedi._compatibility import builtins as _builtins, unicode +from jedi._compatibility import builtins as _builtins, unicode, py_version from jedi import debug from jedi.cache import underscore_memoization, memoize_method -from jedi.evaluate.sys_path import get_sys_path -from jedi.parser.tree import Param, Base, Operator, zero_position_modifier -from jedi.evaluate.helpers import FakeName +from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \ + ContextNameMixin +from jedi.evaluate.base_context import Context, ContextSet +from jedi.evaluate.lazy_context import LazyKnownContext +from jedi.evaluate.compiled.getattr_static import getattr_static from . import fake @@ -22,6 +25,23 @@ _path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep +# Those types don't exist in typing. +MethodDescriptorType = type(str.replace) +WrapperDescriptorType = type(set.__iter__) +# `object.__subclasshook__` is an already executed descriptor. +object_class_dict = type.__dict__["__dict__"].__get__(object) +ClassMethodDescriptorType = type(object_class_dict['__subclasshook__']) + +ALLOWED_DESCRIPTOR_ACCESS = ( + types.FunctionType, + types.GetSetDescriptorType, + types.MemberDescriptorType, + MethodDescriptorType, + WrapperDescriptorType, + ClassMethodDescriptorType, + staticmethod, + classmethod, +) class CheckAttribute(object): """Raises an AttributeError if the attribute X isn't available.""" @@ -32,212 +52,225 @@ def __init__(self, func): def __get__(self, instance, owner): # This might raise an AttributeError. That's wanted. - getattr(instance.obj, self.check_name) + if self.check_name == '__iter__': + # Python iterators are a bit strange, because there's no need for + # the __iter__ function as long as __getitem__ is defined (it will + # just start with __getitem__(0). This is especially true for + # Python 2 strings, where `str.__iter__` is not even defined. + try: + iter(instance.obj) + except TypeError: + raise AttributeError + else: + getattr(instance.obj, self.check_name) return partial(self.func, instance) -class CompiledObject(Base): - # comply with the parser - start_pos = 0, 0 +class CompiledObject(Context): path = None # modules have this attribute - set it to None. - used_names = {} # To be consistent with modules. + used_names = lambda self: {} # To be consistent with modules. - def __init__(self, obj, parent=None): + def __init__(self, evaluator, obj, parent_context=None, faked_class=None): + super(CompiledObject, self).__init__(evaluator, parent_context) self.obj = obj - self.parent = parent + # This attribute will not be set for most classes, except for fakes. + self.tree_node = faked_class - @property - def py__call__(self): - def actual(evaluator, params): - if inspect.isclass(self.obj): - from jedi.evaluate.representation import Instance - return [Instance(evaluator, self, params)] - else: - return list(self._execute_function(evaluator, params)) + def get_root_node(self): + # To make things a bit easier with filters we add this method here. + return self.get_root_context() - # Might raise an AttributeError, which is intentional. - self.obj.__call__ - return actual + @CheckAttribute + def py__call__(self, params): + if inspect.isclass(self.obj): + from jedi.evaluate.context import CompiledInstance + return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params)) + else: + return ContextSet.from_iterable(self._execute_function(params)) @CheckAttribute - def py__class__(self, evaluator): - return CompiledObject(self.obj.__class__, parent=self.parent) + def py__class__(self): + return create(self.evaluator, self.obj.__class__) @CheckAttribute - def py__mro__(self, evaluator): - return tuple(create(evaluator, cls, self.parent) for cls in self.obj.__mro__) + def py__mro__(self): + return (self,) + tuple(create(self.evaluator, cls) for cls in self.obj.__mro__[1:]) @CheckAttribute - def py__bases__(self, evaluator): - return tuple(create(evaluator, cls) for cls in self.obj.__bases__) + def py__bases__(self): + return tuple(create(self.evaluator, cls) for cls in self.obj.__bases__) def py__bool__(self): return bool(self.obj) def py__file__(self): - return self.obj.__file__ + try: + return self.obj.__file__ + except AttributeError: + return None def is_class(self): return inspect.isclass(self.obj) - @property - def doc(self): + def py__doc__(self, include_call_signature=False): return inspect.getdoc(self.obj) or '' - @property - def params(self): - params_str, ret = self._parse_function_doc() - tokens = params_str.split(',') - if inspect.ismethoddescriptor(self._cls().obj): - tokens.insert(0, 'self') - params = [] - for p in tokens: - parts = [FakeName(part) for part in p.strip().split('=')] - if len(parts) > 1: - parts.insert(1, Operator(zero_position_modifier, '=', (0, 0))) - params.append(Param(parts, self)) - return params + def get_param_names(self): + obj = self.obj + try: + if py_version < 33: + raise ValueError("inspect.signature was introduced in 3.3") + if py_version == 34: + # In 3.4 inspect.signature are wrong for str and int. This has + # been fixed in 3.5. The signature of object is returned, + # because no signature was found for str. Here we imitate 3.5 + # logic and just ignore the signature if the magic methods + # don't match object. + # 3.3 doesn't even have the logic and returns nothing for str + # and classes that inherit from object. + user_def = inspect._signature_get_user_defined_method + if (inspect.isclass(obj) + and not user_def(type(obj), '__init__') + and not user_def(type(obj), '__new__') + and (obj.__init__ != object.__init__ + or obj.__new__ != object.__new__)): + raise ValueError + + signature = inspect.signature(obj) + except ValueError: # Has no signature + params_str, ret = self._parse_function_doc() + tokens = params_str.split(',') + if inspect.ismethoddescriptor(obj): + tokens.insert(0, 'self') + for p in tokens: + parts = p.strip().split('=') + yield UnresolvableParamName(self, parts[0]) + else: + for signature_param in signature.parameters.values(): + yield SignatureParamName(self, signature_param) def __repr__(self): - return '<%s: %s>' % (type(self).__name__, repr(self.obj)) + return '<%s: %s>' % (self.__class__.__name__, repr(self.obj)) @underscore_memoization def _parse_function_doc(self): - if self.doc is None: + doc = self.py__doc__() + if doc is None: return '', '' - return _parse_function_doc(self.doc) + return _parse_function_doc(doc) + @property def api_type(self): - if fake.is_class_instance(self.obj): - return 'instance' - - cls = self._cls().obj - if inspect.isclass(cls): + obj = self.obj + if inspect.isclass(obj): return 'class' - elif inspect.ismodule(cls): + elif inspect.ismodule(obj): return 'module' - elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ - or inspect.ismethoddescriptor(cls): + elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \ + or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj): return 'function' + # Everything else... + return 'instance' @property def type(self): """Imitate the tree.Node.type values.""" - cls = self._cls().obj + cls = self._get_class() if inspect.isclass(cls): return 'classdef' elif inspect.ismodule(cls): return 'file_input' - elif inspect.isbuiltin(cls) or inspect.ismethod(cls) \ - or inspect.ismethoddescriptor(cls): + elif inspect.isbuiltin(cls) or inspect.ismethod(cls) or \ + inspect.ismethoddescriptor(cls): return 'funcdef' @underscore_memoization def _cls(self): + """ + We used to limit the lookups for instantiated objects like list(), but + this is not the case anymore. Python itself + """ # Ensures that a CompiledObject is returned that is not an instance (like list) - if fake.is_class_instance(self.obj): - try: - c = self.obj.__class__ - except AttributeError: - # happens with numpy.core.umath._UFUNC_API (you get it - # automatically by doing `import numpy`. - c = type(None) - return CompiledObject(c, self.parent) return self - @property - def names_dict(self): - # For compatibility with `representation.Class`. - return self.names_dicts(False)[0] + def _get_class(self): + if not fake.is_class_instance(self.obj) or \ + inspect.ismethoddescriptor(self.obj): # slots + return self.obj + + try: + return self.obj.__class__ + except AttributeError: + # happens with numpy.core.umath._UFUNC_API (you get it + # automatically by doing `import numpy`. + return type - def names_dicts(self, search_global, is_instance=False): - return self._names_dict_ensure_one_dict(is_instance) + def get_filters(self, search_global=False, is_instance=False, + until_position=None, origin_scope=None): + yield self._ensure_one_filter(is_instance) @memoize_method - def _names_dict_ensure_one_dict(self, is_instance): + def _ensure_one_filter(self, is_instance): """ search_global shouldn't change the fact that there's one dict, this way there's only one `object`. """ - return [LazyNamesDict(self._cls(), is_instance)] + return CompiledObjectFilter(self.evaluator, self, is_instance) - def get_subscope_by_name(self, name): - if name in dir(self._cls().obj): - return CompiledName(self._cls(), name).parent - else: - raise KeyError("CompiledObject doesn't have an attribute '%s'." % name) + @CheckAttribute + def py__getitem__(self, index): + if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): + # Get rid of side effects, we won't call custom `__getitem__`s. + return ContextSet() - def get_index_types(self, evaluator, index_array=()): - # If the object doesn't have `__getitem__`, just raise the - # AttributeError. - if not hasattr(self.obj, '__getitem__'): - debug.warning('Tried to call __getitem__ on non-iterable.') - return [] + return ContextSet(create(self.evaluator, self.obj[index])) + + @CheckAttribute + def py__iter__(self): if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. - return [] + return - result = [] - from jedi.evaluate.iterable import create_indexes_or_slices - for typ in create_indexes_or_slices(evaluator, index_array): - index = None - try: - index = typ.obj - new = self.obj[index] - except (KeyError, IndexError, TypeError, AttributeError): - # Just try, we don't care if it fails, except for slices. - if isinstance(index, slice): - result.append(self) - else: - result.append(CompiledObject(new)) - if not result: - try: - for obj in self.obj: - result.append(CompiledObject(obj)) - except TypeError: - pass # self.obj maynot have an __iter__ method. - return result + for i, part in enumerate(self.obj): + if i > 20: + # Should not go crazy with large iterators + break + yield LazyKnownContext(create(self.evaluator, part)) + + def py__name__(self): + try: + return self._get_class().__name__ + except AttributeError: + return None @property def name(self): - # might not exist sometimes (raises AttributeError) - return FakeName(self._cls().obj.__name__, self) + try: + name = self._get_class().__name__ + except AttributeError: + name = repr(self.obj) + return CompiledContextName(self, name) - def _execute_function(self, evaluator, params): + def _execute_function(self, params): + from jedi.evaluate import docstrings if self.type != 'funcdef': return - for name in self._parse_function_doc()[1].split(): try: - bltn_obj = _create_from_name(builtin, builtin, name) + bltn_obj = getattr(_builtins, name) except AttributeError: continue else: - if isinstance(bltn_obj, CompiledObject) and bltn_obj.obj is None: - # We want everything except None. + if bltn_obj is None: + # We want to evaluate everything except None. + # TODO do we? continue - for result in evaluator.execute(bltn_obj, params): + bltn_obj = create(self.evaluator, bltn_obj) + for result in bltn_obj.execute(params): yield result - - @property - @underscore_memoization - def subscopes(self): - """ - Returns only the faked scopes - the other ones are not important for - internal analysis. - """ - module = self.get_parent_until() - faked_subscopes = [] - for name in dir(self._cls().obj): - f = fake.get_faked(module.obj, self.obj, name) - if f: - f.parent = self - faked_subscopes.append(f) - return faked_subscopes - - def is_scope(self): - return True + for type_ in docstrings.infer_return_types(self): + yield type_ def get_self_attributes(self): return [] # Instance compatibility @@ -245,79 +278,142 @@ def get_self_attributes(self): def get_imports(self): return [] # Builtins don't have imports + def dict_values(self): + return ContextSet.from_iterable( + create(self.evaluator, v) for v in self.obj.values() + ) -class LazyNamesDict(object): - """ - A names_dict instance for compiled objects, resembles the parser.tree. - """ - def __init__(self, compiled_obj, is_instance): - self._compiled_obj = compiled_obj - self._is_instance = is_instance - def __iter__(self): - return (v[0].value for v in self.values()) +class CompiledName(AbstractNameDefinition): + def __init__(self, evaluator, parent_context, name): + self._evaluator = evaluator + self.parent_context = parent_context + self.string_name = name - @memoize_method - def __getitem__(self, name): + def __repr__(self): try: - getattr(self._compiled_obj.obj, name) + name = self.parent_context.name # __name__ is not defined all the time except AttributeError: - raise KeyError('%s in %s not found.' % (name, self._compiled_obj)) - return [CompiledName(self._compiled_obj, name)] + name = None + return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name) - def values(self): - obj = self._compiled_obj.obj + @property + def api_type(self): + return next(iter(self.infer())).api_type - values = [] - for name in dir(obj): - try: - values.append(self[name]) - except KeyError: - # The dir function can be wrong. - pass + @underscore_memoization + def infer(self): + module = self.parent_context.get_root_context() + return ContextSet(_create_from_name( + self._evaluator, module, self.parent_context, self.string_name + )) - # dir doesn't include the type names. - if not inspect.ismodule(obj) and obj != type and not self._is_instance: - values += _type_names_dict.values() - return values +class SignatureParamName(AbstractNameDefinition): + api_type = 'param' -class CompiledName(FakeName): - def __init__(self, obj, name): - super(CompiledName, self).__init__(name) - self._obj = obj - self.name = name + def __init__(self, compiled_obj, signature_param): + self.parent_context = compiled_obj.parent_context + self._signature_param = signature_param - def __repr__(self): + @property + def string_name(self): + return self._signature_param.name + + def infer(self): + p = self._signature_param + evaluator = self.parent_context.evaluator + contexts = ContextSet() + if p.default is not p.empty: + contexts = ContextSet(create(evaluator, p.default)) + if p.annotation is not p.empty: + annotation = create(evaluator, p.annotation) + contexts |= annotation.execute_evaluated() + return contexts + + +class UnresolvableParamName(AbstractNameDefinition): + api_type = 'param' + + def __init__(self, compiled_obj, name): + self.parent_context = compiled_obj.parent_context + self.string_name = name + + def infer(self): + return ContextSet() + + +class CompiledContextName(ContextNameMixin, AbstractNameDefinition): + def __init__(self, context, name): + self.string_name = name + self._context = context + self.parent_context = context.parent_context + + +class EmptyCompiledName(AbstractNameDefinition): + """ + Accessing some names will raise an exception. To avoid not having any + completions, just give Jedi the option to return this object. It infers to + nothing. + """ + def __init__(self, evaluator, name): + self.parent_context = evaluator.BUILTINS + self.string_name = name + + def infer(self): + return ContextSet() + + +class CompiledObjectFilter(AbstractFilter): + name_class = CompiledName + + def __init__(self, evaluator, compiled_object, is_instance=False): + self._evaluator = evaluator + self._compiled_object = compiled_object + self._is_instance = is_instance + + @memoize_method + def get(self, name): + name = str(name) + obj = self._compiled_object.obj try: - name = self._obj.name # __name__ is not defined all the time + attr, is_get_descriptor = getattr_static(obj, name) except AttributeError: - name = None - return '<%s: (%s).%s>' % (type(self).__name__, name, self.name) + return [] + else: + if is_get_descriptor \ + and not type(attr) in ALLOWED_DESCRIPTOR_ACCESS: + # In case of descriptors that have get methods we cannot return + # it's value, because that would mean code execution. + return [EmptyCompiledName(self._evaluator, name)] + if self._is_instance and name not in dir(obj): + return [] + return [self._create_name(name)] - def is_definition(self): - return True + def values(self): + obj = self._compiled_object.obj - @property - @underscore_memoization - def parent(self): - module = self._obj.get_parent_until() - return _create_from_name(module, self._obj, self.name) + names = [] + for name in dir(obj): + names += self.get(name) - @parent.setter - def parent(self, value): - pass # Just ignore this, FakeName tries to overwrite the parent attribute. + is_instance = self._is_instance or fake.is_class_instance(obj) + # ``dir`` doesn't include the type names. + if not inspect.ismodule(obj) and (obj is not type) and not is_instance: + for filter in create(self._evaluator, type).get_filters(): + names += filter.values() + return names + def _create_name(self, name): + return self.name_class(self._evaluator, self._compiled_object, name) -def dotted_from_fs_path(fs_path, sys_path=None): + +def dotted_from_fs_path(fs_path, sys_path): """ Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. compares the path with sys.path and then returns the dotted_path. If the path is not in the sys.path, just returns None. """ - if sys_path is None: - sys_path = get_sys_path() - if os.path.basename(fs_path).startswith('__init__.'): # We are calculating the path. __init__ files are not interesting. fs_path = os.path.dirname(fs_path) @@ -338,20 +434,22 @@ def dotted_from_fs_path(fs_path, sys_path=None): for s in sys_path: if (fs_path.startswith(s) and len(path) < len(s)): path = s - return _path_re.sub('', fs_path[len(path):].lstrip(os.path.sep)).replace(os.path.sep, '.') + # - Window + # X:\path\to\lib-dynload/datetime.pyd => datetime + module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/') + # - Window + # Replace like X:\path\to\something/foo/bar.py + return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.') -def load_module(path=None, name=None): + +def load_module(evaluator, path=None, name=None): + sys_path = list(evaluator.project.sys_path) if path is not None: - dotted_path = dotted_from_fs_path(path) + dotted_path = dotted_from_fs_path(path, sys_path=sys_path) else: dotted_path = name - sys_path = get_sys_path() - if dotted_path is None: - p, _, dotted_path = path.partition(os.path.sep) - sys_path.insert(0, p) - temp, sys.path = sys.path, sys_path try: __import__(dotted_path) @@ -364,7 +462,7 @@ def load_module(path=None, name=None): raise except ImportError: # If a module is "corrupt" or not really a Python module or whatever. - debug.warning('Module %s not importable.', path) + debug.warning('Module %s not importable in path %s.', dotted_path, path) return None finally: sys.path = temp @@ -373,7 +471,7 @@ def load_module(path=None, name=None): # complicated import structure of Python. module = sys.modules[dotted_path] - return CompiledObject(module) + return create(evaluator, module) docstr_defaults = { @@ -445,10 +543,30 @@ def change_options(m): return param_str, ret -class Builtin(CompiledObject): - @memoize_method - def get_by_name(self, name): - return self.names_dict[name][0].parent +def _create_from_name(evaluator, module, compiled_object, name): + obj = compiled_object.obj + faked = None + try: + faked = fake.get_faked(evaluator, module, obj, parent_context=compiled_object, name=name) + if faked.type == 'funcdef': + from jedi.evaluate.context.function import FunctionContext + return FunctionContext(evaluator, compiled_object, faked) + except fake.FakeDoesNotExist: + pass + + try: + obj = getattr(obj, name) + except AttributeError: + # Happens e.g. in properties of + # PyQt4.QtGui.QStyleOptionComboBox.currentText + # -> just set it to None + obj = None + return create(evaluator, obj, parent_context=compiled_object, faked=faked) + + +def builtin_from_name(evaluator, string): + bltn_obj = getattr(_builtins, string) + return create(evaluator, bltn_obj) def _a_generator(foo): @@ -457,75 +575,64 @@ def _a_generator(foo): yield foo -def _create_from_name(module, parent, name): - faked = fake.get_faked(module.obj, parent.obj, name) - # only functions are necessary. - if faked is not None: - faked.parent = parent - return faked +_SPECIAL_OBJECTS = { + 'FUNCTION_CLASS': type(load_module), + 'METHOD_CLASS': type(CompiledObject.is_class), + 'MODULE_CLASS': type(os), + 'GENERATOR_OBJECT': _a_generator(1.0), + 'BUILTINS': _builtins, +} - try: - obj = getattr(parent.obj, name) - except AttributeError: - # happens e.g. in properties of - # PyQt4.QtGui.QStyleOptionComboBox.currentText - # -> just set it to None - obj = None - return CompiledObject(obj, parent) - - -builtin = Builtin(_builtins) -magic_function_class = CompiledObject(type(load_module), parent=builtin) -generator_obj = CompiledObject(_a_generator(1.0)) -_type_names_dict = builtin.get_by_name('type').names_dict -none_obj = builtin.get_by_name('None') -false_obj = builtin.get_by_name('False') -true_obj = builtin.get_by_name('True') -object_obj = builtin.get_by_name('object') - - -def keyword_from_value(obj): - if obj is None: - return none_obj - elif obj is False: - return false_obj - elif obj is True: - return true_obj - else: - raise NotImplementedError +def get_special_object(evaluator, identifier): + obj = _SPECIAL_OBJECTS[identifier] + return create(evaluator, obj, parent_context=create(evaluator, _builtins)) -def compiled_objects_cache(func): - def wrapper(evaluator, obj, parent=builtin, module=None): - # Do a very cheap form of caching here. - key = id(obj), id(parent), id(module) - try: - return evaluator.compiled_cache[key][0] - except KeyError: - result = func(evaluator, obj, parent, module) - # Need to cache all of them, otherwise the id could be overwritten. - evaluator.compiled_cache[key] = result, obj, parent, module - return result - return wrapper +def compiled_objects_cache(attribute_name): + def decorator(func): + """ + This decorator caches just the ids, oopposed to caching the object itself. + Caching the id has the advantage that an object doesn't need to be + hashable. + """ + def wrapper(evaluator, obj, parent_context=None, module=None, faked=None): + cache = getattr(evaluator, attribute_name) + # Do a very cheap form of caching here. + key = id(obj), id(parent_context) + try: + return cache[key][0] + except KeyError: + # TODO this whole decorator is way too ugly + result = func(evaluator, obj, parent_context, module, faked) + # Need to cache all of them, otherwise the id could be overwritten. + cache[key] = result, obj, parent_context, module, faked + return result + return wrapper + + return decorator -@compiled_objects_cache -def create(evaluator, obj, parent=builtin, module=None): + +@compiled_objects_cache('compiled_cache') +def create(evaluator, obj, parent_context=None, module=None, faked=None): """ A very weird interface class to this module. The more options provided the more acurate loading compiled objects is. """ + if inspect.ismodule(obj): + if parent_context is not None: + # Modules don't have parents, be careful with caching: recurse. + return create(evaluator, obj) + else: + if parent_context is None and obj is not _builtins: + return create(evaluator, obj, create(evaluator, _builtins)) - if not inspect.ismodule(obj): - faked = fake.get_faked(module and module.obj, obj) - if faked is not None: - faked.parent = parent - return faked - - try: - if parent == builtin and obj.__module__ in ('builtins', '__builtin__'): - return builtin.get_by_name(obj.__name__) - except AttributeError: - pass - - return CompiledObject(obj, parent) + try: + faked = fake.get_faked(evaluator, module, obj, parent_context=parent_context) + if faked.type == 'funcdef': + from jedi.evaluate.context.function import FunctionContext + return FunctionContext(evaluator, parent_context, faked) + except fake.FakeDoesNotExist: + pass + + return CompiledObject(evaluator, obj, parent_context, faked) diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake.py b/pythonFiles/release/jedi/evaluate/compiled/fake.py index 0037cfc77a80..60dbefe4acac 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake.py +++ b/pythonFiles/release/jedi/evaluate/compiled/fake.py @@ -6,16 +6,48 @@ import os import inspect +import types +from itertools import chain -from jedi._compatibility import is_py3, builtins, unicode -from jedi.parser import Parser, load_grammar -from jedi.parser import tree as pt -from jedi.evaluate.helpers import FakeName +from parso.python import tree + +from jedi._compatibility import is_py3, builtins, unicode, is_py34 modules = {} -def _load_faked_module(module): +MethodDescriptorType = type(str.replace) +# These are not considered classes and access is granted even though they have +# a __class__ attribute. +NOT_CLASS_TYPES = ( + types.BuiltinFunctionType, + types.CodeType, + types.FrameType, + types.FunctionType, + types.GeneratorType, + types.GetSetDescriptorType, + types.LambdaType, + types.MemberDescriptorType, + types.MethodType, + types.ModuleType, + types.TracebackType, + MethodDescriptorType +) + +if is_py3: + NOT_CLASS_TYPES += ( + types.MappingProxyType, + types.SimpleNamespace + ) + if is_py34: + NOT_CLASS_TYPES += (types.DynamicClassAttribute,) + + +class FakeDoesNotExist(Exception): + pass + + +def _load_faked_module(grammar, module): module_name = module.__name__ if module_name == '__builtin__' and not is_py3: module_name = 'builtins' @@ -30,23 +62,21 @@ def _load_faked_module(module): except IOError: modules[module_name] = None return - grammar = load_grammar('grammar3.4') - module = Parser(grammar, unicode(source), module_name).module - modules[module_name] = module + modules[module_name] = m = grammar.parse(unicode(source)) if module_name == 'builtins' and not is_py3: # There are two implementations of `open` for either python 2/3. # -> Rename the python2 version (`look at fake/builtins.pym`). - open_func = search_scope(module, 'open') - open_func.children[1] = FakeName('open_python3') - open_func = search_scope(module, 'open_python2') - open_func.children[1] = FakeName('open') - return module + open_func = _search_scope(m, 'open') + open_func.children[1].value = 'open_python3' + open_func = _search_scope(m, 'open_python2') + open_func.children[1].value = 'open' + return m -def search_scope(scope, obj_name): - for s in scope.subscopes: - if str(s.name) == obj_name: +def _search_scope(scope, obj_name): + for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()): + if s.name.value == obj_name: return s @@ -64,60 +94,120 @@ def get_module(obj): # Unfortunately in some cases like `int` there's no __module__ return builtins else: - return __import__(imp_plz) + if imp_plz is None: + # Happens for example in `(_ for _ in []).send.__module__`. + return builtins + else: + try: + return __import__(imp_plz) + except ImportError: + # __module__ can be something arbitrary that doesn't exist. + return builtins -def _faked(module, obj, name): +def _faked(grammar, module, obj, name): # Crazy underscore actions to try to escape all the internal madness. if module is None: module = get_module(obj) - faked_mod = _load_faked_module(module) + faked_mod = _load_faked_module(grammar, module) if faked_mod is None: - return + return None, None - # Having the module as a `parser.representation.module`, we need to scan + # Having the module as a `parser.python.tree.Module`, we need to scan # for methods. if name is None: - if inspect.isbuiltin(obj): - return search_scope(faked_mod, obj.__name__) + if inspect.isbuiltin(obj) or inspect.isclass(obj): + return _search_scope(faked_mod, obj.__name__), faked_mod elif not inspect.isclass(obj): # object is a method or descriptor - cls = search_scope(faked_mod, obj.__objclass__.__name__) - if cls is None: - return - return search_scope(cls, obj.__name__) + try: + objclass = obj.__objclass__ + except AttributeError: + return None, None + else: + cls = _search_scope(faked_mod, objclass.__name__) + if cls is None: + return None, None + return _search_scope(cls, obj.__name__), faked_mod else: - if obj == module: - return search_scope(faked_mod, name) + if obj is module: + return _search_scope(faked_mod, name), faked_mod else: - cls = search_scope(faked_mod, obj.__name__) + try: + cls_name = obj.__name__ + except AttributeError: + return None, None + cls = _search_scope(faked_mod, cls_name) if cls is None: - return - return search_scope(cls, name) + return None, None + return _search_scope(cls, name), faked_mod + return None, None + +def memoize_faked(obj): + """ + A typical memoize function that ignores issues with non hashable results. + """ + cache = obj.cache = {} + + def memoizer(*args, **kwargs): + key = (obj, args, frozenset(kwargs.items())) + try: + result = cache[key] + except (TypeError, ValueError): + return obj(*args, **kwargs) + except KeyError: + result = obj(*args, **kwargs) + if result is not None: + cache[key] = obj(*args, **kwargs) + return result + else: + return result + return memoizer -def get_faked(module, obj, name=None): - obj = obj.__class__ if is_class_instance(obj) else obj - result = _faked(module, obj, name) - if result is None or isinstance(result, pt.Class): + +@memoize_faked +def _get_faked(grammar, module, obj, name=None): + result, fake_module = _faked(grammar, module, obj, name) + if result is None: # We're not interested in classes. What we want is functions. - return None + raise FakeDoesNotExist + elif result.type == 'classdef': + return result, fake_module else: # Set the docstr which was previously not set (faked modules don't # contain it). + assert result.type == 'funcdef' doc = '"""%s"""' % obj.__doc__ # TODO need escapes. suite = result.children[-1] - string = pt.String(pt.zero_position_modifier, doc, (0, 0), '') - new_line = pt.Whitespace('\n', (0, 0), '') - docstr_node = pt.Node('simple_stmt', [string, new_line]) - suite.children.insert(2, docstr_node) - return result + string = tree.String(doc, (0, 0), '') + new_line = tree.Newline('\n', (0, 0)) + docstr_node = tree.PythonNode('simple_stmt', [string, new_line]) + suite.children.insert(1, docstr_node) + return result, fake_module + + +def get_faked(evaluator, module, obj, name=None, parent_context=None): + if parent_context and parent_context.tree_node is not None: + # Try to search in already clearly defined stuff. + found = _search_scope(parent_context.tree_node, name) + if found is not None: + return found + else: + raise FakeDoesNotExist + + faked, fake_module = _get_faked(evaluator.latest_grammar, module and module.obj, obj, name) + if module is not None: + module.get_used_names = fake_module.get_used_names + return faked def is_class_instance(obj): """Like inspect.* methods.""" - return not (inspect.isclass(obj) or inspect.ismodule(obj) - or inspect.isbuiltin(obj) or inspect.ismethod(obj) - or inspect.ismethoddescriptor(obj) or inspect.iscode(obj) - or inspect.isgenerator(obj)) + try: + cls = obj.__class__ + except AttributeError: + return False + else: + return cls != type and not issubclass(cls, NOT_CLASS_TYPES) diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym index 8d21a2c4a7c6..298d0b0dba88 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/_weakref.pym @@ -1,8 +1,9 @@ def proxy(object, callback=None): return object -class weakref(): +class ref(): def __init__(self, object, callback=None): self.__object = object + def __call__(self): return self.__object diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym index 1283de00317b..1225929c2cb7 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/builtins.pym @@ -32,9 +32,16 @@ def range(start, stop=None, step=1): class file(): def __iter__(self): yield '' + def next(self): return '' + def readlines(self): + return [''] + + def __enter__(self): + return self + class xrange(): # Attention: this function doesn't exist in Py3k (there it is range). @@ -124,7 +131,7 @@ class list(): return self.__iterable[y] def pop(self): - return self.__iterable[-1] + return self.__iterable[int()] class tuple(): @@ -202,11 +209,29 @@ class dict(): except KeyError: return d + def values(self): + return self.__elements.values() + def setdefault(self, k, d): # TODO maybe also return the content return d +class enumerate(): + def __init__(self, sequence, start=0): + self.__sequence = sequence + + def __iter__(self): + for i in self.__sequence: + yield 1, i + + def __next__(self): + return next(self.__iter__()) + + def next(self): + return next(self.__iter__()) + + class reversed(): def __init__(self, sequence): self.__sequence = sequence @@ -238,6 +263,11 @@ class str(): def __init__(self, obj): pass + def strip(self): + return str() + + def split(self): + return [str()] class type(): def mro(): diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym index 87b02eed4321..c1f4fc011662 100755 --- a/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/io.pym @@ -4,3 +4,9 @@ class TextIOWrapper(): def __iter__(self): yield str() + + def readlines(self): + return [''] + + def __enter__(self): + return self diff --git a/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym b/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym new file mode 100644 index 000000000000..d40d468179aa --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/compiled/fake/operator.pym @@ -0,0 +1,33 @@ +# Just copied this code from Python 3.6. + +class itemgetter: + """ + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + """ + __slots__ = ('_items', '_call') + + def __init__(self, item, *items): + if not items: + self._items = (item,) + def func(obj): + return obj[item] + self._call = func + else: + self._items = items = (item,) + items + def func(obj): + return tuple(obj[i] for i in items) + self._call = func + + def __call__(self, obj): + return self._call(obj) + + def __repr__(self): + return '%s.%s(%s)' % (self.__class__.__module__, + self.__class__.__name__, + ', '.join(map(repr, self._items))) + + def __reduce__(self): + return self.__class__, self._items + diff --git a/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py b/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py new file mode 100644 index 000000000000..9f8cd8a838cf --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/compiled/getattr_static.py @@ -0,0 +1,175 @@ +""" +A static version of getattr. +This is a backport of the Python 3 code with a little bit of additional +information returned to enable Jedi to make decisions. +""" + +import types + +from jedi._compatibility import py_version + +_sentinel = object() + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + return _sentinel + +def _is_type(obj): + try: + _static_getmro(obj) + except TypeError: + return False + return True + + +def _shadowed_dict_newstyle(klass): + dict_attr = type.__dict__["__dict__"] + for entry in _static_getmro(klass): + try: + class_dict = dict_attr.__get__(entry)["__dict__"] + except KeyError: + pass + else: + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + + +def _static_getmro_newstyle(klass): + return type.__dict__['__mro__'].__get__(klass) + + +if py_version >= 30: + _shadowed_dict = _shadowed_dict_newstyle + _get_type = type + _static_getmro = _static_getmro_newstyle +else: + def _shadowed_dict(klass): + """ + In Python 2 __dict__ is not overwritable: + + class Foo(object): pass + setattr(Foo, '__dict__', 4) + + Traceback (most recent call last): + File "", line 1, in + TypeError: __dict__ must be a dictionary object + + It applies to both newstyle and oldstyle classes: + + class Foo(object): pass + setattr(Foo, '__dict__', 4) + Traceback (most recent call last): + File "", line 1, in + AttributeError: attribute '__dict__' of 'type' objects is not writable + + It also applies to instances of those objects. However to keep things + straight forward, newstyle classes always use the complicated way of + accessing it while oldstyle classes just use getattr. + """ + if type(klass) is _oldstyle_class_type: + return getattr(klass, '__dict__', _sentinel) + return _shadowed_dict_newstyle(klass) + + class _OldStyleClass(): + pass + + _oldstyle_instance_type = type(_OldStyleClass()) + _oldstyle_class_type = type(_OldStyleClass) + + def _get_type(obj): + type_ = object.__getattribute__(obj, '__class__') + if type_ is _oldstyle_instance_type: + # Somehow for old style classes we need to access it directly. + return obj.__class__ + return type_ + + def _static_getmro(klass): + if type(klass) is _oldstyle_class_type: + def oldstyle_mro(klass): + """ + Oldstyle mro is a really simplistic way of look up mro: + https://stackoverflow.com/questions/54867/what-is-the-difference-between-old-style-and-new-style-classes-in-python + """ + yield klass + for base in klass.__bases__: + for yield_from in oldstyle_mro(base): + yield yield_from + + return oldstyle_mro(klass) + + return _static_getmro_newstyle(klass) + + +def _safe_hasattr(obj, name): + return _check_class(_get_type(obj), name) is not _sentinel + + +def _safe_is_data_descriptor(obj): + return (_safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')) + + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + + Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that + the attribute is a descriptor that has a `__get__` attribute. + """ + instance_result = _sentinel + if not _is_type(obj): + klass = _get_type(obj) + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or + type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if _safe_hasattr(klass_result, '__get__') \ + and _safe_is_data_descriptor(klass_result): + # A get/set descriptor has priority over everything. + return klass_result, True + + if instance_result is not _sentinel: + return instance_result, False + if klass_result is not _sentinel: + return klass_result, _safe_hasattr(klass_result, '__get__') + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr], False + except KeyError: + pass + if default is not _sentinel: + return default, False + raise AttributeError(attr) diff --git a/pythonFiles/release/jedi/evaluate/compiled/mixed.py b/pythonFiles/release/jedi/evaluate/compiled/mixed.py new file mode 100644 index 000000000000..ac0f6dd614db --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/compiled/mixed.py @@ -0,0 +1,231 @@ +""" +Used only for REPL Completion. +""" + +import inspect +import os + +from jedi import settings +from jedi.evaluate import compiled +from jedi.cache import underscore_memoization +from jedi.evaluate import imports +from jedi.evaluate.base_context import Context, ContextSet +from jedi.evaluate.context import ModuleContext +from jedi.evaluate.cache import evaluator_function_cache +from jedi.evaluate.compiled.getattr_static import getattr_static + + +class MixedObject(object): + """ + A ``MixedObject`` is used in two ways: + + 1. It uses the default logic of ``parser.python.tree`` objects, + 2. except for getattr calls. The names dicts are generated in a fashion + like ``CompiledObject``. + + This combined logic makes it possible to provide more powerful REPL + completion. It allows side effects that are not noticable with the default + parser structure to still be completeable. + + The biggest difference from CompiledObject to MixedObject is that we are + generally dealing with Python code and not with C code. This will generate + fewer special cases, because we in Python you don't have the same freedoms + to modify the runtime. + """ + def __init__(self, evaluator, parent_context, compiled_object, tree_context): + self.evaluator = evaluator + self.parent_context = parent_context + self.compiled_object = compiled_object + self._context = tree_context + self.obj = compiled_object.obj + + # We have to overwrite everything that has to do with trailers, name + # lookups and filters to make it possible to route name lookups towards + # compiled objects and the rest towards tree node contexts. + def py__getattribute__(*args, **kwargs): + return Context.py__getattribute__(*args, **kwargs) + + def get_filters(self, *args, **kwargs): + yield MixedObjectFilter(self.evaluator, self) + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, repr(self.obj)) + + def __getattr__(self, name): + return getattr(self._context, name) + + +class MixedName(compiled.CompiledName): + """ + The ``CompiledName._compiled_object`` is our MixedObject. + """ + @property + def start_pos(self): + contexts = list(self.infer()) + if not contexts: + # This means a start_pos that doesn't exist (compiled objects). + return (0, 0) + return contexts[0].name.start_pos + + @start_pos.setter + def start_pos(self, value): + # Ignore the __init__'s start_pos setter call. + pass + + @underscore_memoization + def infer(self): + obj = self.parent_context.obj + try: + # TODO use logic from compiled.CompiledObjectFilter + obj = getattr(obj, self.string_name) + except AttributeError: + # Happens e.g. in properties of + # PyQt4.QtGui.QStyleOptionComboBox.currentText + # -> just set it to None + obj = None + return ContextSet( + _create(self._evaluator, obj, parent_context=self.parent_context) + ) + + @property + def api_type(self): + return next(iter(self.infer())).api_type + + +class MixedObjectFilter(compiled.CompiledObjectFilter): + name_class = MixedName + + def __init__(self, evaluator, mixed_object, is_instance=False): + super(MixedObjectFilter, self).__init__( + evaluator, mixed_object, is_instance) + self._mixed_object = mixed_object + + #def _create(self, name): + #return MixedName(self._evaluator, self._compiled_object, name) + + +@evaluator_function_cache() +def _load_module(evaluator, path, python_object): + module = evaluator.grammar.parse( + path=path, + cache=True, + diff_cache=True, + cache_path=settings.cache_directory + ).get_root_node() + python_module = inspect.getmodule(python_object) + + evaluator.modules[python_module.__name__] = module + return module + + +def _get_object_to_check(python_object): + """Check if inspect.getfile has a chance to find the source.""" + if (inspect.ismodule(python_object) or + inspect.isclass(python_object) or + inspect.ismethod(python_object) or + inspect.isfunction(python_object) or + inspect.istraceback(python_object) or + inspect.isframe(python_object) or + inspect.iscode(python_object)): + return python_object + + try: + return python_object.__class__ + except AttributeError: + raise TypeError # Prevents computation of `repr` within inspect. + + +def find_syntax_node_name(evaluator, python_object): + try: + python_object = _get_object_to_check(python_object) + path = inspect.getsourcefile(python_object) + except TypeError: + # The type might not be known (e.g. class_with_dict.__weakref__) + return None, None + if path is None or not os.path.exists(path): + # The path might not exist or be e.g. . + return None, None + + module = _load_module(evaluator, path, python_object) + + if inspect.ismodule(python_object): + # We don't need to check names for modules, because there's not really + # a way to write a module in a module in Python (and also __name__ can + # be something like ``email.utils``). + return module, path + + try: + name_str = python_object.__name__ + except AttributeError: + # Stuff like python_function.__code__. + return None, None + + if name_str == '': + return None, None # It's too hard to find lambdas. + + # Doesn't always work (e.g. os.stat_result) + try: + names = module.get_used_names()[name_str] + except KeyError: + return None, None + names = [n for n in names if n.is_definition()] + + try: + code = python_object.__code__ + # By using the line number of a code object we make the lookup in a + # file pretty easy. There's still a possibility of people defining + # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people + # do so we just don't care. + line_nr = code.co_firstlineno + except AttributeError: + pass + else: + line_names = [name for name in names if name.start_pos[0] == line_nr] + # There's a chance that the object is not available anymore, because + # the code has changed in the background. + if line_names: + return line_names[-1].parent, path + + # It's really hard to actually get the right definition, here as a last + # resort we just return the last one. This chance might lead to odd + # completions at some points but will lead to mostly correct type + # inference, because people tend to define a public name in a module only + # once. + return names[-1].parent, path + + +@compiled.compiled_objects_cache('mixed_cache') +def _create(evaluator, obj, parent_context=None, *args): + tree_node, path = find_syntax_node_name(evaluator, obj) + + compiled_object = compiled.create( + evaluator, obj, parent_context=parent_context.compiled_object) + if tree_node is None: + return compiled_object + + module_node = tree_node.get_root_node() + if parent_context.tree_node.get_root_node() == module_node: + module_context = parent_context.get_root_context() + else: + module_context = ModuleContext(evaluator, module_node, path=path) + # TODO this __name__ is probably wrong. + name = compiled_object.get_root_context().py__name__() + imports.add_module(evaluator, name, module_context) + + tree_context = module_context.create_context( + tree_node, + node_is_context=True, + node_is_object=True + ) + if tree_node.type == 'classdef': + if not inspect.isclass(obj): + # Is an instance, not a class. + tree_context, = tree_context.execute_evaluated() + + return MixedObject( + evaluator, + parent_context, + compiled_object, + tree_context=tree_context + ) + diff --git a/pythonFiles/release/jedi/evaluate/context/__init__.py b/pythonFiles/release/jedi/evaluate/context/__init__.py new file mode 100644 index 000000000000..4e7ce4d6c831 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/__init__.py @@ -0,0 +1,5 @@ +from jedi.evaluate.context.module import ModuleContext +from jedi.evaluate.context.klass import ClassContext +from jedi.evaluate.context.function import FunctionContext, FunctionExecutionContext +from jedi.evaluate.context.instance import AnonymousInstance, BoundMethod, \ + CompiledInstance, AbstractInstanceContext, TreeInstance diff --git a/pythonFiles/release/jedi/evaluate/context/function.py b/pythonFiles/release/jedi/evaluate/context/function.py new file mode 100644 index 000000000000..0dba9c91d707 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/function.py @@ -0,0 +1,226 @@ +from parso.python import tree + +from jedi._compatibility import use_metaclass +from jedi import debug +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate import compiled +from jedi.evaluate import recursion +from jedi.evaluate import docstrings +from jedi.evaluate import pep0484 +from jedi.evaluate import flow_analysis +from jedi.evaluate import helpers +from jedi.evaluate.arguments import AnonymousArguments +from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \ + ContextName, AbstractNameDefinition, ParamName +from jedi.evaluate.base_context import ContextualizedNode, NO_CONTEXTS, \ + ContextSet, TreeContext +from jedi.evaluate.lazy_context import LazyKnownContexts, LazyKnownContext, \ + LazyTreeContext +from jedi.evaluate.context import iterable +from jedi import parser_utils +from jedi.evaluate.parser_cache import get_yield_exprs + + +class LambdaName(AbstractNameDefinition): + string_name = '' + + def __init__(self, lambda_context): + self._lambda_context = lambda_context + self.parent_context = lambda_context.parent_context + + def start_pos(self): + return self._lambda_context.tree_node.start_pos + + def infer(self): + return ContextSet(self._lambda_context) + + +class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)): + """ + Needed because of decorators. Decorators are evaluated here. + """ + api_type = 'function' + + def __init__(self, evaluator, parent_context, funcdef): + """ This should not be called directly """ + super(FunctionContext, self).__init__(evaluator, parent_context) + self.tree_node = funcdef + + def get_filters(self, search_global, until_position=None, origin_scope=None): + if search_global: + yield ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ) + else: + scope = self.py__class__() + for filter in scope.get_filters(search_global=False, origin_scope=origin_scope): + yield filter + + def infer_function_execution(self, function_execution): + """ + Created to be used by inheritance. + """ + yield_exprs = get_yield_exprs(self.evaluator, self.tree_node) + if yield_exprs: + return ContextSet(iterable.Generator(self.evaluator, function_execution)) + else: + return function_execution.get_return_values() + + def get_function_execution(self, arguments=None): + if arguments is None: + arguments = AnonymousArguments() + + return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments) + + def py__call__(self, arguments): + function_execution = self.get_function_execution(arguments) + return self.infer_function_execution(function_execution) + + def py__class__(self): + # This differentiation is only necessary for Python2. Python3 does not + # use a different method class. + if isinstance(parser_utils.get_parent_scope(self.tree_node), tree.Class): + name = 'METHOD_CLASS' + else: + name = 'FUNCTION_CLASS' + return compiled.get_special_object(self.evaluator, name) + + @property + def name(self): + if self.tree_node.type == 'lambdef': + return LambdaName(self) + return ContextName(self, self.tree_node.name) + + def get_param_names(self): + function_execution = self.get_function_execution() + return [ParamName(function_execution, param.name) + for param in self.tree_node.get_params()] + + +class FunctionExecutionContext(TreeContext): + """ + This class is used to evaluate functions and their returns. + + This is the most complicated class, because it contains the logic to + transfer parameters. It is even more complicated, because there may be + multiple calls to functions and recursion has to be avoided. But this is + responsibility of the decorators. + """ + function_execution_filter = FunctionExecutionFilter + + def __init__(self, evaluator, parent_context, function_context, var_args): + super(FunctionExecutionContext, self).__init__(evaluator, parent_context) + self.function_context = function_context + self.tree_node = function_context.tree_node + self.var_args = var_args + + @evaluator_method_cache(default=NO_CONTEXTS) + @recursion.execution_recursion_decorator() + def get_return_values(self, check_yields=False): + funcdef = self.tree_node + if funcdef.type == 'lambdef': + return self.evaluator.eval_element(self, funcdef.children[-1]) + + if check_yields: + context_set = NO_CONTEXTS + returns = get_yield_exprs(self.evaluator, funcdef) + else: + returns = funcdef.iter_return_stmts() + context_set = docstrings.infer_return_types(self.function_context) + context_set |= pep0484.infer_return_types(self.function_context) + + for r in returns: + check = flow_analysis.reachability_check(self, funcdef, r) + if check is flow_analysis.UNREACHABLE: + debug.dbg('Return unreachable: %s', r) + else: + if check_yields: + context_set |= ContextSet.from_sets( + lazy_context.infer() + for lazy_context in self._eval_yield(r) + ) + else: + try: + children = r.children + except AttributeError: + context_set |= ContextSet(compiled.create(self.evaluator, None)) + else: + context_set |= self.eval_node(children[1]) + if check is flow_analysis.REACHABLE: + debug.dbg('Return reachable: %s', r) + break + return context_set + + def _eval_yield(self, yield_expr): + if yield_expr.type == 'keyword': + # `yield` just yields None. + yield LazyKnownContext(compiled.create(self.evaluator, None)) + return + + node = yield_expr.children[1] + if node.type == 'yield_arg': # It must be a yield from. + cn = ContextualizedNode(self, node.children[1]) + for lazy_context in cn.infer().iterate(cn): + yield lazy_context + else: + yield LazyTreeContext(self, node) + + @recursion.execution_recursion_decorator(default=iter([])) + def get_yield_values(self): + for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef', + 'while_stmt', 'if_stmt')) + for y in get_yield_exprs(self.evaluator, self.tree_node)] + + # Calculate if the yields are placed within the same for loop. + yields_order = [] + last_for_stmt = None + for yield_, for_stmt in for_parents: + # For really simple for loops we can predict the order. Otherwise + # we just ignore it. + parent = for_stmt.parent + if parent.type == 'suite': + parent = parent.parent + if for_stmt.type == 'for_stmt' and parent == self.tree_node \ + and parser_utils.for_stmt_defines_one_name(for_stmt): # Simplicity for now. + if for_stmt == last_for_stmt: + yields_order[-1][1].append(yield_) + else: + yields_order.append((for_stmt, [yield_])) + elif for_stmt == self.tree_node: + yields_order.append((None, [yield_])) + else: + types = self.get_return_values(check_yields=True) + if types: + yield LazyKnownContexts(types) + return + last_for_stmt = for_stmt + + for for_stmt, yields in yields_order: + if for_stmt is None: + # No for_stmt, just normal yields. + for yield_ in yields: + for result in self._eval_yield(yield_): + yield result + else: + input_node = for_stmt.get_testlist() + cn = ContextualizedNode(self, input_node) + ordered = cn.infer().iterate(cn) + ordered = list(ordered) + for lazy_context in ordered: + dct = {str(for_stmt.children[1].value): lazy_context.infer()} + with helpers.predefine_names(self, for_stmt, dct): + for yield_in_same_for_stmt in yields: + for result in self._eval_yield(yield_in_same_for_stmt): + yield result + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield self.function_execution_filter(self.evaluator, self, + until_position=until_position, + origin_scope=origin_scope) + + @evaluator_method_cache() + def get_params(self): + return self.var_args.get_params(self) diff --git a/pythonFiles/release/jedi/evaluate/context/instance.py b/pythonFiles/release/jedi/evaluate/context/instance.py new file mode 100644 index 000000000000..2c8d796c9c6d --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/instance.py @@ -0,0 +1,435 @@ +from abc import abstractproperty + +from jedi._compatibility import is_py3 +from jedi import debug +from jedi.evaluate import compiled +from jedi.evaluate import filters +from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \ + iterator_to_context_set +from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments +from jedi.cache import memoize_method +from jedi.evaluate.context.function import FunctionExecutionContext, FunctionContext +from jedi.evaluate.context.klass import ClassContext, apply_py__get__ +from jedi.evaluate.context import iterable +from jedi.parser_utils import get_parent_scope + + + +class InstanceFunctionExecution(FunctionExecutionContext): + def __init__(self, instance, parent_context, function_context, var_args): + self.instance = instance + var_args = InstanceVarArgs(self, var_args) + + super(InstanceFunctionExecution, self).__init__( + instance.evaluator, parent_context, function_context, var_args) + + +class AnonymousInstanceFunctionExecution(FunctionExecutionContext): + function_execution_filter = filters.AnonymousInstanceFunctionExecutionFilter + + def __init__(self, instance, parent_context, function_context, var_args): + self.instance = instance + super(AnonymousInstanceFunctionExecution, self).__init__( + instance.evaluator, parent_context, function_context, var_args) + + +class AbstractInstanceContext(Context): + """ + This class is used to evaluate instances. + """ + api_type = 'instance' + function_execution_cls = InstanceFunctionExecution + + def __init__(self, evaluator, parent_context, class_context, var_args): + super(AbstractInstanceContext, self).__init__(evaluator, parent_context) + # Generated instances are classes that are just generated by self + # (No var_args) used. + self.class_context = class_context + self.var_args = var_args + + def is_class(self): + return False + + @property + def py__call__(self): + names = self.get_function_slot_names('__call__') + if not names: + # Means the Instance is not callable. + raise AttributeError + + def execute(arguments): + return ContextSet.from_sets(name.execute(arguments) for name in names) + + return execute + + def py__class__(self): + return self.class_context + + def py__bool__(self): + # Signalize that we don't know about the bool type. + return None + + def get_function_slot_names(self, name): + # Python classes don't look at the dictionary of the instance when + # looking up `__call__`. This is something that has to do with Python's + # internal slot system (note: not __slots__, but C slots). + for filter in self.get_filters(include_self_names=False): + names = filter.get(name) + if names: + return names + return [] + + def execute_function_slots(self, names, *evaluated_args): + return ContextSet.from_sets( + name.execute_evaluated(*evaluated_args) + for name in names + ) + + def py__get__(self, obj): + # Arguments in __get__ descriptors are obj, class. + # `method` is the new parent of the array, don't know if that's good. + names = self.get_function_slot_names('__get__') + if names: + if isinstance(obj, AbstractInstanceContext): + return self.execute_function_slots(names, obj, obj.class_context) + else: + none_obj = compiled.create(self.evaluator, None) + return self.execute_function_slots(names, none_obj, obj) + else: + return ContextSet(self) + + def get_filters(self, search_global=None, until_position=None, + origin_scope=None, include_self_names=True): + if include_self_names: + for cls in self.class_context.py__mro__(): + if isinstance(cls, compiled.CompiledObject): + if cls.tree_node is not None: + # In this case we're talking about a fake object, it + # doesn't make sense for normal compiled objects to + # search for self variables. + yield SelfNameFilter(self.evaluator, self, cls, origin_scope) + else: + yield SelfNameFilter(self.evaluator, self, cls, origin_scope) + + for cls in self.class_context.py__mro__(): + if isinstance(cls, compiled.CompiledObject): + yield CompiledInstanceClassFilter(self.evaluator, self, cls) + else: + yield InstanceClassFilter(self.evaluator, self, cls, origin_scope) + + def py__getitem__(self, index): + try: + names = self.get_function_slot_names('__getitem__') + except KeyError: + debug.warning('No __getitem__, cannot access the array.') + return NO_CONTEXTS + else: + index_obj = compiled.create(self.evaluator, index) + return self.execute_function_slots(names, index_obj) + + def py__iter__(self): + iter_slot_names = self.get_function_slot_names('__iter__') + if not iter_slot_names: + debug.warning('No __iter__ on %s.' % self) + return + + for generator in self.execute_function_slots(iter_slot_names): + if isinstance(generator, AbstractInstanceContext): + # `__next__` logic. + name = '__next__' if is_py3 else 'next' + iter_slot_names = generator.get_function_slot_names(name) + if iter_slot_names: + yield LazyKnownContexts( + generator.execute_function_slots(iter_slot_names) + ) + else: + debug.warning('Instance has no __next__ function in %s.', generator) + else: + for lazy_context in generator.py__iter__(): + yield lazy_context + + @abstractproperty + def name(self): + pass + + def _create_init_execution(self, class_context, func_node): + bound_method = BoundMethod( + self.evaluator, self, class_context, self.parent_context, func_node + ) + return self.function_execution_cls( + self, + class_context.parent_context, + bound_method, + self.var_args + ) + + def create_init_executions(self): + for name in self.get_function_slot_names('__init__'): + if isinstance(name, LazyInstanceName): + yield self._create_init_execution(name.class_context, name.tree_name.parent) + + @evaluator_method_cache() + def create_instance_context(self, class_context, node): + if node.parent.type in ('funcdef', 'classdef'): + node = node.parent + scope = get_parent_scope(node) + if scope == class_context.tree_node: + return class_context + else: + parent_context = self.create_instance_context(class_context, scope) + if scope.type == 'funcdef': + if scope.name.value == '__init__' and parent_context == class_context: + return self._create_init_execution(class_context, scope) + else: + bound_method = BoundMethod( + self.evaluator, self, class_context, + parent_context, scope + ) + return bound_method.get_function_execution() + elif scope.type == 'classdef': + class_context = ClassContext(self.evaluator, scope, parent_context) + return class_context + elif scope.type == 'comp_for': + # Comprehensions currently don't have a special scope in Jedi. + return self.create_instance_context(class_context, scope) + else: + raise NotImplementedError + return class_context + + def __repr__(self): + return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_context, + self.var_args) + + +class CompiledInstance(AbstractInstanceContext): + def __init__(self, *args, **kwargs): + super(CompiledInstance, self).__init__(*args, **kwargs) + # I don't think that dynamic append lookups should happen here. That + # sounds more like something that should go to py__iter__. + if self.class_context.name.string_name in ['list', 'set'] \ + and self.parent_context.get_root_context() == self.evaluator.BUILTINS: + # compare the module path with the builtin name. + self.var_args = iterable.get_dynamic_array_instance(self) + + @property + def name(self): + return compiled.CompiledContextName(self, self.class_context.name.string_name) + + def create_instance_context(self, class_context, node): + if get_parent_scope(node).type == 'classdef': + return class_context + else: + return super(CompiledInstance, self).create_instance_context(class_context, node) + + +class TreeInstance(AbstractInstanceContext): + def __init__(self, evaluator, parent_context, class_context, var_args): + super(TreeInstance, self).__init__(evaluator, parent_context, + class_context, var_args) + self.tree_node = class_context.tree_node + + @property + def name(self): + return filters.ContextName(self, self.class_context.name.tree_name) + + +class AnonymousInstance(TreeInstance): + function_execution_cls = AnonymousInstanceFunctionExecution + + def __init__(self, evaluator, parent_context, class_context): + super(AnonymousInstance, self).__init__( + evaluator, + parent_context, + class_context, + var_args=AnonymousArguments(), + ) + + +class CompiledInstanceName(compiled.CompiledName): + def __init__(self, evaluator, instance, parent_context, name): + super(CompiledInstanceName, self).__init__(evaluator, parent_context, name) + self._instance = instance + + @iterator_to_context_set + def infer(self): + for result_context in super(CompiledInstanceName, self).infer(): + if isinstance(result_context, FunctionContext): + parent_context = result_context.parent_context + while parent_context.is_class(): + parent_context = parent_context.parent_context + + yield BoundMethod( + result_context.evaluator, self._instance, self.parent_context, + parent_context, result_context.tree_node + ) + else: + if result_context.api_type == 'function': + yield CompiledBoundMethod(result_context) + else: + yield result_context + + +class CompiledInstanceClassFilter(compiled.CompiledObjectFilter): + name_class = CompiledInstanceName + + def __init__(self, evaluator, instance, compiled_object): + super(CompiledInstanceClassFilter, self).__init__( + evaluator, + compiled_object, + is_instance=True, + ) + self._instance = instance + + def _create_name(self, name): + return self.name_class( + self._evaluator, self._instance, self._compiled_object, name) + + +class BoundMethod(FunctionContext): + def __init__(self, evaluator, instance, class_context, *args, **kwargs): + super(BoundMethod, self).__init__(evaluator, *args, **kwargs) + self._instance = instance + self._class_context = class_context + + def get_function_execution(self, arguments=None): + if arguments is None: + arguments = AnonymousArguments() + return AnonymousInstanceFunctionExecution( + self._instance, self.parent_context, self, arguments) + else: + return InstanceFunctionExecution( + self._instance, self.parent_context, self, arguments) + + +class CompiledBoundMethod(compiled.CompiledObject): + def __init__(self, func): + super(CompiledBoundMethod, self).__init__( + func.evaluator, func.obj, func.parent_context, func.tree_node) + + def get_param_names(self): + return list(super(CompiledBoundMethod, self).get_param_names())[1:] + + +class InstanceNameDefinition(filters.TreeNameDefinition): + def infer(self): + return super(InstanceNameDefinition, self).infer() + + +class LazyInstanceName(filters.TreeNameDefinition): + """ + This name calculates the parent_context lazily. + """ + def __init__(self, instance, class_context, tree_name): + self._instance = instance + self.class_context = class_context + self.tree_name = tree_name + + @property + def parent_context(self): + return self._instance.create_instance_context(self.class_context, self.tree_name) + + +class LazyInstanceClassName(LazyInstanceName): + @iterator_to_context_set + def infer(self): + for result_context in super(LazyInstanceClassName, self).infer(): + if isinstance(result_context, FunctionContext): + # Classes are never used to resolve anything within the + # functions. Only other functions and modules will resolve + # those things. + parent_context = result_context.parent_context + while parent_context.is_class(): + parent_context = parent_context.parent_context + + yield BoundMethod( + result_context.evaluator, self._instance, self.class_context, + parent_context, result_context.tree_node + ) + else: + for c in apply_py__get__(result_context, self._instance): + yield c + + +class InstanceClassFilter(filters.ParserTreeFilter): + name_class = LazyInstanceClassName + + def __init__(self, evaluator, context, class_context, origin_scope): + super(InstanceClassFilter, self).__init__( + evaluator=evaluator, + context=context, + node_context=class_context, + origin_scope=origin_scope + ) + self._class_context = class_context + + def _equals_origin_scope(self): + node = self._origin_scope + while node is not None: + if node == self._parser_scope or node == self.context: + return True + node = get_parent_scope(node) + return False + + def _access_possible(self, name): + return not name.value.startswith('__') or name.value.endswith('__') \ + or self._equals_origin_scope() + + def _filter(self, names): + names = super(InstanceClassFilter, self)._filter(names) + return [name for name in names if self._access_possible(name)] + + def _convert_names(self, names): + return [self.name_class(self.context, self._class_context, name) for name in names] + + +class SelfNameFilter(InstanceClassFilter): + name_class = LazyInstanceName + + def _filter(self, names): + names = self._filter_self_names(names) + if isinstance(self._parser_scope, compiled.CompiledObject) and False: + # This would be for builtin skeletons, which are not yet supported. + return list(names) + else: + start, end = self._parser_scope.start_pos, self._parser_scope.end_pos + return [n for n in names if start < n.start_pos < end] + + def _filter_self_names(self, names): + for name in names: + trailer = name.parent + if trailer.type == 'trailer' \ + and len(trailer.children) == 2 \ + and trailer.children[0] == '.': + if name.is_definition() and self._access_possible(name): + yield name + + def _check_flows(self, names): + return names + + +class InstanceVarArgs(AbstractArguments): + def __init__(self, execution_context, var_args): + self._execution_context = execution_context + self._var_args = var_args + + @memoize_method + def _get_var_args(self): + return self._var_args + + @property + def argument_node(self): + return self._var_args.argument_node + + @property + def trailer(self): + return self._var_args.trailer + + def unpack(self, func=None): + yield None, LazyKnownContext(self._execution_context.instance) + for values in self._get_var_args().unpack(func): + yield values + + def get_calling_nodes(self): + return self._get_var_args().get_calling_nodes() diff --git a/pythonFiles/release/jedi/evaluate/context/iterable.py b/pythonFiles/release/jedi/evaluate/context/iterable.py new file mode 100644 index 000000000000..d0f468e43425 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/iterable.py @@ -0,0 +1,691 @@ +""" +Contains all classes and functions to deal with lists, dicts, generators and +iterators in general. + +Array modifications +******************* + +If the content of an array (``set``/``list``) is requested somewhere, the +current module will be checked for appearances of ``arr.append``, +``arr.insert``, etc. If the ``arr`` name points to an actual array, the +content will be added + +This can be really cpu intensive, as you can imagine. Because |jedi| has to +follow **every** ``append`` and check wheter it's the right array. However this +works pretty good, because in *slow* cases, the recursion detector and other +settings will stop this process. + +It is important to note that: + +1. Array modfications work only in the current module. +2. Jedi only checks Array additions; ``list.pop``, etc are ignored. +""" +from jedi import debug +from jedi import settings +from jedi.evaluate import compiled +from jedi.evaluate import analysis +from jedi.evaluate import recursion +from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ + LazyTreeContext +from jedi.evaluate.helpers import is_string, predefine_names, evaluate_call_of_leaf +from jedi.evaluate.utils import safe_property +from jedi.evaluate.utils import to_list +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.filters import ParserTreeFilter, has_builtin_methods, \ + register_builtin_method, SpecialMethodFilter +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \ + TreeContext, ContextualizedNode +from jedi.parser_utils import get_comp_fors + + +class AbstractIterable(Context): + builtin_methods = {} + api_type = 'instance' + + def __init__(self, evaluator): + super(AbstractIterable, self).__init__(evaluator, evaluator.BUILTINS) + + def get_filters(self, search_global, until_position=None, origin_scope=None): + raise NotImplementedError + + @property + def name(self): + return compiled.CompiledContextName(self, self.array_type) + + +@has_builtin_methods +class GeneratorMixin(object): + array_type = None + + @register_builtin_method('send') + @register_builtin_method('next', python_version_match=2) + @register_builtin_method('__next__', python_version_match=3) + def py__next__(self): + # TODO add TypeError if params are given. + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) + + def get_filters(self, search_global, until_position=None, origin_scope=None): + gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT') + yield SpecialMethodFilter(self, self.builtin_methods, gen_obj) + for filter in gen_obj.get_filters(search_global): + yield filter + + def py__bool__(self): + return True + + def py__class__(self): + gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT') + return gen_obj.py__class__() + + @property + def name(self): + return compiled.CompiledContextName(self, 'generator') + + +class Generator(GeneratorMixin, Context): + """Handling of `yield` functions.""" + def __init__(self, evaluator, func_execution_context): + super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS) + self._func_execution_context = func_execution_context + + def py__iter__(self): + return self._func_execution_context.get_yield_values() + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._func_execution_context) + + +class CompForContext(TreeContext): + @classmethod + def from_comp_for(cls, parent_context, comp_for): + return cls(parent_context.evaluator, parent_context, comp_for) + + def __init__(self, evaluator, parent_context, comp_for): + super(CompForContext, self).__init__(evaluator, parent_context) + self.tree_node = comp_for + + def get_node(self): + return self.tree_node + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield ParserTreeFilter(self.evaluator, self) + + +class Comprehension(AbstractIterable): + @staticmethod + def from_atom(evaluator, context, atom): + bracket = atom.children[0] + if bracket == '{': + if atom.children[1].children[1] == ':': + cls = DictComprehension + else: + cls = SetComprehension + elif bracket == '(': + cls = GeneratorComprehension + elif bracket == '[': + cls = ListComprehension + return cls(evaluator, context, atom) + + def __init__(self, evaluator, defining_context, atom): + super(Comprehension, self).__init__(evaluator) + self._defining_context = defining_context + self._atom = atom + + def _get_comprehension(self): + # The atom contains a testlist_comp + return self._atom.children[1] + + def _get_comp_for(self): + # The atom contains a testlist_comp + return self._get_comprehension().children[1] + + def _eval_node(self, index=0): + """ + The first part `x + 1` of the list comprehension: + + [x + 1 for x in foo] + """ + return self._get_comprehension().children[index] + + @evaluator_method_cache() + def _get_comp_for_context(self, parent_context, comp_for): + # TODO shouldn't this be part of create_context? + return CompForContext.from_comp_for(parent_context, comp_for) + + def _nested(self, comp_fors, parent_context=None): + comp_for = comp_fors[0] + input_node = comp_for.children[3] + parent_context = parent_context or self._defining_context + input_types = parent_context.eval_node(input_node) + + cn = ContextualizedNode(parent_context, input_node) + iterated = input_types.iterate(cn) + exprlist = comp_for.children[1] + for i, lazy_context in enumerate(iterated): + types = lazy_context.infer() + dct = unpack_tuple_to_dict(parent_context, types, exprlist) + context_ = self._get_comp_for_context( + parent_context, + comp_for, + ) + with predefine_names(context_, comp_for, dct): + try: + for result in self._nested(comp_fors[1:], context_): + yield result + except IndexError: + iterated = context_.eval_node(self._eval_node()) + if self.array_type == 'dict': + yield iterated, context_.eval_node(self._eval_node(2)) + else: + yield iterated + + @evaluator_method_cache(default=[]) + @to_list + def _iterate(self): + comp_fors = tuple(get_comp_fors(self._get_comp_for())) + for result in self._nested(comp_fors): + yield result + + def py__iter__(self): + for set_ in self._iterate(): + yield LazyKnownContexts(set_) + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._atom) + + +class ArrayMixin(object): + def get_filters(self, search_global, until_position=None, origin_scope=None): + # `array.type` is a string with the type, e.g. 'list'. + compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type) + yield SpecialMethodFilter(self, self.builtin_methods, compiled_obj) + for typ in compiled_obj.execute_evaluated(self): + for filter in typ.get_filters(): + yield filter + + def py__bool__(self): + return None # We don't know the length, because of appends. + + def py__class__(self): + return compiled.builtin_from_name(self.evaluator, self.array_type) + + @safe_property + def parent(self): + return self.evaluator.BUILTINS + + def dict_values(self): + return ContextSet.from_sets( + self._defining_context.eval_node(v) + for k, v in self._items() + ) + + +class ListComprehension(ArrayMixin, Comprehension): + array_type = 'list' + + def py__getitem__(self, index): + if isinstance(index, slice): + return ContextSet(self) + + all_types = list(self.py__iter__()) + return all_types[index].infer() + + +class SetComprehension(ArrayMixin, Comprehension): + array_type = 'set' + + +@has_builtin_methods +class DictComprehension(ArrayMixin, Comprehension): + array_type = 'dict' + + def _get_comp_for(self): + return self._get_comprehension().children[3] + + def py__iter__(self): + for keys, values in self._iterate(): + yield LazyKnownContexts(keys) + + def py__getitem__(self, index): + for keys, values in self._iterate(): + for k in keys: + if isinstance(k, compiled.CompiledObject): + if k.obj == index: + return values + return self.dict_values() + + def dict_values(self): + return ContextSet.from_sets(values for keys, values in self._iterate()) + + @register_builtin_method('values') + def _imitate_values(self): + lazy_context = LazyKnownContexts(self.dict_values()) + return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context])) + + @register_builtin_method('items') + def _imitate_items(self): + items = ContextSet.from_iterable( + FakeSequence( + self.evaluator, 'tuple' + (LazyKnownContexts(keys), LazyKnownContexts(values)) + ) for keys, values in self._iterate() + ) + + return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list') + + +class GeneratorComprehension(GeneratorMixin, Comprehension): + pass + + +class SequenceLiteralContext(ArrayMixin, AbstractIterable): + mapping = {'(': 'tuple', + '[': 'list', + '{': 'set'} + + def __init__(self, evaluator, defining_context, atom): + super(SequenceLiteralContext, self).__init__(evaluator) + self.atom = atom + self._defining_context = defining_context + + if self.atom.type in ('testlist_star_expr', 'testlist'): + self.array_type = 'tuple' + else: + self.array_type = SequenceLiteralContext.mapping[atom.children[0]] + """The builtin name of the array (list, set, tuple or dict).""" + + def py__getitem__(self, index): + """Here the index is an int/str. Raises IndexError/KeyError.""" + if self.array_type == 'dict': + for key, value in self._items(): + for k in self._defining_context.eval_node(key): + if isinstance(k, compiled.CompiledObject) \ + and index == k.obj: + return self._defining_context.eval_node(value) + raise KeyError('No key found in dictionary %s.' % self) + + # Can raise an IndexError + if isinstance(index, slice): + return ContextSet(self) + else: + return self._defining_context.eval_node(self._items()[index]) + + def py__iter__(self): + """ + While values returns the possible values for any array field, this + function returns the value for a certain index. + """ + if self.array_type == 'dict': + # Get keys. + types = ContextSet() + for k, _ in self._items(): + types |= self._defining_context.eval_node(k) + # We don't know which dict index comes first, therefore always + # yield all the types. + for _ in types: + yield LazyKnownContexts(types) + else: + for node in self._items(): + yield LazyTreeContext(self._defining_context, node) + + for addition in check_array_additions(self._defining_context, self): + yield addition + + def _values(self): + """Returns a list of a list of node.""" + if self.array_type == 'dict': + return ContextSet.from_sets(v for k, v in self._items()) + else: + return self._items() + + def _items(self): + c = self.atom.children + + if self.atom.type in ('testlist_star_expr', 'testlist'): + return c[::2] + + array_node = c[1] + if array_node in (']', '}', ')'): + return [] # Direct closing bracket, doesn't contain items. + + if array_node.type == 'testlist_comp': + return array_node.children[::2] + elif array_node.type == 'dictorsetmaker': + kv = [] + iterator = iter(array_node.children) + for key in iterator: + op = next(iterator, None) + if op is None or op == ',': + kv.append(key) # A set. + else: + assert op == ':' # A dict. + kv.append((key, next(iterator))) + next(iterator, None) # Possible comma. + return kv + else: + return [array_node] + + def exact_key_items(self): + """ + Returns a generator of tuples like dict.items(), where the key is + resolved (as a string) and the values are still lazy contexts. + """ + for key_node, value in self._items(): + for key in self._defining_context.eval_node(key_node): + if is_string(key): + yield key.obj, LazyTreeContext(self._defining_context, value) + + def __repr__(self): + return "<%s of %s>" % (self.__class__.__name__, self.atom) + + +@has_builtin_methods +class DictLiteralContext(SequenceLiteralContext): + array_type = 'dict' + + def __init__(self, evaluator, defining_context, atom): + super(SequenceLiteralContext, self).__init__(evaluator) + self._defining_context = defining_context + self.atom = atom + + @register_builtin_method('values') + def _imitate_values(self): + lazy_context = LazyKnownContexts(self.dict_values()) + return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context])) + + @register_builtin_method('items') + def _imitate_items(self): + lazy_contexts = [ + LazyKnownContext(FakeSequence( + self.evaluator, 'tuple', + (LazyTreeContext(self._defining_context, key_node), + LazyTreeContext(self._defining_context, value_node)) + )) for key_node, value_node in self._items() + ] + + return ContextSet(FakeSequence(self.evaluator, 'list', lazy_contexts)) + + +class _FakeArray(SequenceLiteralContext): + def __init__(self, evaluator, container, type): + super(SequenceLiteralContext, self).__init__(evaluator) + self.array_type = type + self.atom = container + # TODO is this class really needed? + + +class FakeSequence(_FakeArray): + def __init__(self, evaluator, array_type, lazy_context_list): + """ + type should be one of "tuple", "list" + """ + super(FakeSequence, self).__init__(evaluator, None, array_type) + self._lazy_context_list = lazy_context_list + + def py__getitem__(self, index): + return self._lazy_context_list[index].infer() + + def py__iter__(self): + return self._lazy_context_list + + def py__bool__(self): + return bool(len(self._lazy_context_list)) + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._lazy_context_list) + + +class FakeDict(_FakeArray): + def __init__(self, evaluator, dct): + super(FakeDict, self).__init__(evaluator, dct, 'dict') + self._dct = dct + + def py__iter__(self): + for key in self._dct: + yield LazyKnownContext(compiled.create(self.evaluator, key)) + + def py__getitem__(self, index): + return self._dct[index].infer() + + def dict_values(self): + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values()) + + def exact_key_items(self): + return self._dct.items() + + +class MergedArray(_FakeArray): + def __init__(self, evaluator, arrays): + super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type) + self._arrays = arrays + + def py__iter__(self): + for array in self._arrays: + for lazy_context in array.py__iter__(): + yield lazy_context + + def py__getitem__(self, index): + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) + + def _items(self): + for array in self._arrays: + for a in array._items(): + yield a + + def __len__(self): + return sum(len(a) for a in self._arrays) + + +def unpack_tuple_to_dict(context, types, exprlist): + """ + Unpacking tuple assignments in for statements and expr_stmts. + """ + if exprlist.type == 'name': + return {exprlist.value: types} + elif exprlist.type == 'atom' and exprlist.children[0] in '([': + return unpack_tuple_to_dict(context, types, exprlist.children[1]) + elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', + 'testlist_star_expr'): + dct = {} + parts = iter(exprlist.children[::2]) + n = 0 + for lazy_context in types.iterate(exprlist): + n += 1 + try: + part = next(parts) + except StopIteration: + # TODO this context is probably not right. + analysis.add(context, 'value-error-too-many-values', part, + message="ValueError: too many values to unpack (expected %s)" % n) + else: + dct.update(unpack_tuple_to_dict(context, lazy_context.infer(), part)) + has_parts = next(parts, None) + if types and has_parts is not None: + # TODO this context is probably not right. + analysis.add(context, 'value-error-too-few-values', has_parts, + message="ValueError: need more than %s values to unpack" % n) + return dct + elif exprlist.type == 'power' or exprlist.type == 'atom_expr': + # Something like ``arr[x], var = ...``. + # This is something that is not yet supported, would also be difficult + # to write into a dict. + return {} + elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings + # Currently we're not supporting them. + return {} + raise NotImplementedError + + +def check_array_additions(context, sequence): + """ Just a mapper function for the internal _check_array_additions """ + if sequence.array_type not in ('list', 'set'): + # TODO also check for dict updates + return NO_CONTEXTS + + return _check_array_additions(context, sequence) + + +@evaluator_method_cache(default=NO_CONTEXTS) +@debug.increase_indent +def _check_array_additions(context, sequence): + """ + Checks if a `Array` has "add" (append, insert, extend) statements: + + >>> a = [""] + >>> a.append(1) + """ + from jedi.evaluate import arguments + + debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA') + module_context = context.get_root_context() + if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject): + debug.dbg('Dynamic array search aborted.', color='MAGENTA') + return ContextSet() + + def find_additions(context, arglist, add_name): + params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack()) + result = set() + if add_name in ['insert']: + params = params[1:] + if add_name in ['append', 'add', 'insert']: + for key, whatever in params: + result.add(whatever) + elif add_name in ['extend', 'update']: + for key, lazy_context in params: + result |= set(lazy_context.infer().iterate()) + return result + + temp_param_add, settings.dynamic_params_for_other_modules = \ + settings.dynamic_params_for_other_modules, False + + is_list = sequence.name.string_name == 'list' + search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update']) + + added_types = set() + for add_name in search_names: + try: + possible_names = module_context.tree_node.get_used_names()[add_name] + except KeyError: + continue + else: + for name in possible_names: + context_node = context.tree_node + if not (context_node.start_pos < name.start_pos < context_node.end_pos): + continue + trailer = name.parent + power = trailer.parent + trailer_pos = power.children.index(trailer) + try: + execution_trailer = power.children[trailer_pos + 1] + except IndexError: + continue + else: + if execution_trailer.type != 'trailer' \ + or execution_trailer.children[0] != '(' \ + or execution_trailer.children[1] == ')': + continue + + random_context = context.create_context(name) + + with recursion.execution_allowed(context.evaluator, power) as allowed: + if allowed: + found = evaluate_call_of_leaf( + random_context, + name, + cut_own_trailer=True + ) + if sequence in found: + # The arrays match. Now add the results + added_types |= find_additions( + random_context, + execution_trailer.children[1], + add_name + ) + + # reset settings + settings.dynamic_params_for_other_modules = temp_param_add + debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA') + return added_types + + +def get_dynamic_array_instance(instance): + """Used for set() and list() instances.""" + if not settings.dynamic_array_additions: + return instance.var_args + + ai = _ArrayInstance(instance) + from jedi.evaluate import arguments + return arguments.ValuesArguments([ContextSet(ai)]) + + +class _ArrayInstance(object): + """ + Used for the usage of set() and list(). + This is definitely a hack, but a good one :-) + It makes it possible to use set/list conversions. + + In contrast to Array, ListComprehension and all other iterable types, this + is something that is only used inside `evaluate/compiled/fake/builtins.py` + and therefore doesn't need filters, `py__bool__` and so on, because + we don't use these operations in `builtins.py`. + """ + def __init__(self, instance): + self.instance = instance + self.var_args = instance.var_args + + def py__iter__(self): + var_args = self.var_args + try: + _, lazy_context = next(var_args.unpack()) + except StopIteration: + pass + else: + for lazy in lazy_context.infer().iterate(): + yield lazy + + from jedi.evaluate import arguments + if isinstance(var_args, arguments.TreeArguments): + additions = _check_array_additions(var_args.context, self.instance) + for addition in additions: + yield addition + + def iterate(self, contextualized_node=None): + return self.py__iter__() + + +class Slice(Context): + def __init__(self, context, start, stop, step): + super(Slice, self).__init__( + context.evaluator, + parent_context=context.evaluator.BUILTINS + ) + self._context = context + # all of them are either a Precedence or None. + self._start = start + self._stop = stop + self._step = step + + @property + def obj(self): + """ + Imitate CompiledObject.obj behavior and return a ``builtin.slice()`` + object. + """ + def get(element): + if element is None: + return None + + result = self._context.eval_node(element) + if len(result) != 1: + # For simplicity, we want slices to be clear defined with just + # one type. Otherwise we will return an empty slice object. + raise IndexError + try: + return list(result)[0].obj + except AttributeError: + return None + + try: + return slice(get(self._start), get(self._stop), get(self._step)) + except IndexError: + return slice(None, None, None) diff --git a/pythonFiles/release/jedi/evaluate/context/klass.py b/pythonFiles/release/jedi/evaluate/context/klass.py new file mode 100644 index 000000000000..b7d61d3e16bf --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/klass.py @@ -0,0 +1,197 @@ +""" +Like described in the :mod:`parso.python.tree` module, +there's a need for an ast like module to represent the states of parsed +modules. + +But now there are also structures in Python that need a little bit more than +that. An ``Instance`` for example is only a ``Class`` before it is +instantiated. This class represents these cases. + +So, why is there also a ``Class`` class here? Well, there are decorators and +they change classes in Python 3. + +Representation modules also define "magic methods". Those methods look like +``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` +and others. Here's a list: + +====================================== ======================================== +**Method** **Description** +-------------------------------------- ---------------------------------------- +py__call__(params: Array) On callable objects, returns types. +py__bool__() Returns True/False/None; None means that + there's no certainty. +py__bases__() Returns a list of base classes. +py__mro__() Returns a list of classes (the mro). +py__iter__() Returns a generator of a set of types. +py__class__() Returns the class of an instance. +py__getitem__(index: int/str) Returns a a set of types of the index. + Can raise an IndexError/KeyError. +py__file__() Only on modules. Returns None if does + not exist. +py__package__() Only on modules. For the import system. +py__path__() Only on modules. For the import system. +py__get__(call_object) Only on instances. Simulates + descriptors. +py__doc__(include_call_signature: Returns the docstring for a context. + bool) +====================================== ======================================== + +""" +from jedi._compatibility import use_metaclass +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate import compiled +from jedi.evaluate.lazy_context import LazyKnownContext +from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \ + ContextName, AnonymousInstanceParamName +from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \ + TreeContext + + +def apply_py__get__(context, base_context): + try: + method = context.py__get__ + except AttributeError: + yield context + else: + for descriptor_context in method(base_context): + yield descriptor_context + + +class ClassName(TreeNameDefinition): + def __init__(self, parent_context, tree_name, name_context): + super(ClassName, self).__init__(parent_context, tree_name) + self._name_context = name_context + + @iterator_to_context_set + def infer(self): + # TODO this _name_to_types might get refactored and be a part of the + # parent class. Once it is, we can probably just overwrite method to + # achieve this. + from jedi.evaluate.syntax_tree import tree_name_to_contexts + inferred = tree_name_to_contexts( + self.parent_context.evaluator, self._name_context, self.tree_name) + + for result_context in inferred: + for c in apply_py__get__(result_context, self.parent_context): + yield c + + +class ClassFilter(ParserTreeFilter): + name_class = ClassName + + def _convert_names(self, names): + return [self.name_class(self.context, name, self._node_context) + for name in names] + + +class ClassContext(use_metaclass(CachedMetaClass, TreeContext)): + """ + This class is not only important to extend `tree.Class`, it is also a + important for descriptors (if the descriptor methods are evaluated or not). + """ + api_type = 'class' + + def __init__(self, evaluator, parent_context, classdef): + super(ClassContext, self).__init__(evaluator, parent_context=parent_context) + self.tree_node = classdef + + @evaluator_method_cache(default=()) + def py__mro__(self): + def add(cls): + if cls not in mro: + mro.append(cls) + + mro = [self] + # TODO Do a proper mro resolution. Currently we are just listing + # classes. However, it's a complicated algorithm. + for lazy_cls in self.py__bases__(): + # TODO there's multiple different mro paths possible if this yields + # multiple possibilities. Could be changed to be more correct. + for cls in lazy_cls.infer(): + # TODO detect for TypeError: duplicate base class str, + # e.g. `class X(str, str): pass` + try: + mro_method = cls.py__mro__ + except AttributeError: + # TODO add a TypeError like: + """ + >>> class Y(lambda: test): pass + Traceback (most recent call last): + File "", line 1, in + TypeError: function() argument 1 must be code, not str + >>> class Y(1): pass + Traceback (most recent call last): + File "", line 1, in + TypeError: int() takes at most 2 arguments (3 given) + """ + pass + else: + add(cls) + for cls_new in mro_method(): + add(cls_new) + return tuple(mro) + + @evaluator_method_cache(default=()) + def py__bases__(self): + arglist = self.tree_node.get_super_arglist() + if arglist: + from jedi.evaluate import arguments + args = arguments.TreeArguments(self.evaluator, self, arglist) + return [value for key, value in args.unpack() if key is None] + else: + return [LazyKnownContext(compiled.create(self.evaluator, object))] + + def py__call__(self, params): + from jedi.evaluate.context import TreeInstance + return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params)) + + def py__class__(self): + return compiled.create(self.evaluator, type) + + def get_params(self): + from jedi.evaluate.context import AnonymousInstance + anon = AnonymousInstance(self.evaluator, self.parent_context, self) + return [AnonymousInstanceParamName(anon, param.name) for param in self.funcdef.get_params()] + + def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False): + if search_global: + yield ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ) + else: + for cls in self.py__mro__(): + if isinstance(cls, compiled.CompiledObject): + for filter in cls.get_filters(is_instance=is_instance): + yield filter + else: + yield ClassFilter( + self.evaluator, self, node_context=cls, + origin_scope=origin_scope) + + def is_class(self): + return True + + def get_function_slot_names(self, name): + for filter in self.get_filters(search_global=False): + names = filter.get(name) + if names: + return names + return [] + + def get_param_names(self): + for name in self.get_function_slot_names('__init__'): + for context_ in name.infer(): + try: + method = context_.get_param_names + except AttributeError: + pass + else: + return list(method())[1:] + return [] + + @property + def name(self): + return ContextName(self, self.tree_node.name) diff --git a/pythonFiles/release/jedi/evaluate/context/module.py b/pythonFiles/release/jedi/evaluate/context/module.py new file mode 100644 index 000000000000..5ba92cdb1c3e --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/module.py @@ -0,0 +1,213 @@ +import pkgutil +import imp +import re +import os + +from parso import python_bytes_to_unicode + +from jedi._compatibility import use_metaclass +from jedi.evaluate.cache import CachedMetaClass, evaluator_method_cache +from jedi.evaluate.filters import GlobalNameFilter, ContextNameMixin, \ + AbstractNameDefinition, ParserTreeFilter, DictFilter +from jedi.evaluate import compiled +from jedi.evaluate.base_context import TreeContext +from jedi.evaluate.imports import SubModuleName, infer_import + + +class _ModuleAttributeName(AbstractNameDefinition): + """ + For module attributes like __file__, __str__ and so on. + """ + api_type = 'instance' + + def __init__(self, parent_module, string_name): + self.parent_context = parent_module + self.string_name = string_name + + def infer(self): + return compiled.create(self.parent_context.evaluator, str).execute_evaluated() + + +class ModuleName(ContextNameMixin, AbstractNameDefinition): + start_pos = 1, 0 + + def __init__(self, context, name): + self._context = context + self._name = name + + @property + def string_name(self): + return self._name + + +class ModuleContext(use_metaclass(CachedMetaClass, TreeContext)): + api_type = 'module' + parent_context = None + + def __init__(self, evaluator, module_node, path): + super(ModuleContext, self).__init__(evaluator, parent_context=None) + self.tree_node = module_node + self._path = path + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ) + yield GlobalNameFilter(self, self.tree_node) + yield DictFilter(self._sub_modules_dict()) + yield DictFilter(self._module_attributes_dict()) + for star_module in self.star_imports(): + yield next(star_module.get_filters(search_global)) + + # I'm not sure if the star import cache is really that effective anymore + # with all the other really fast import caches. Recheck. Also we would need + # to push the star imports into Evaluator.modules, if we reenable this. + @evaluator_method_cache([]) + def star_imports(self): + modules = [] + for i in self.tree_node.iter_imports(): + if i.is_star_import(): + name = i.get_paths()[-1][-1] + new = infer_import(self, name) + for module in new: + if isinstance(module, ModuleContext): + modules += module.star_imports() + modules += new + return modules + + @evaluator_method_cache() + def _module_attributes_dict(self): + names = ['__file__', '__package__', '__doc__', '__name__'] + # All the additional module attributes are strings. + return dict((n, _ModuleAttributeName(self, n)) for n in names) + + @property + def _string_name(self): + """ This is used for the goto functions. """ + if self._path is None: + return '' # no path -> empty name + else: + sep = (re.escape(os.path.sep),) * 2 + r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path) + # Remove PEP 3149 names + return re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) + + @property + @evaluator_method_cache() + def name(self): + return ModuleName(self, self._string_name) + + def _get_init_directory(self): + """ + :return: The path to the directory of a package. None in case it's not + a package. + """ + for suffix, _, _ in imp.get_suffixes(): + ending = '__init__' + suffix + py__file__ = self.py__file__() + if py__file__ is not None and py__file__.endswith(ending): + # Remove the ending, including the separator. + return self.py__file__()[:-len(ending) - 1] + return None + + def py__name__(self): + for name, module in self.evaluator.modules.items(): + if module == self and name != '': + return name + + return '__main__' + + def py__file__(self): + """ + In contrast to Python's __file__ can be None. + """ + if self._path is None: + return None + + return os.path.abspath(self._path) + + def py__package__(self): + if self._get_init_directory() is None: + return re.sub(r'\.?[^\.]+$', '', self.py__name__()) + else: + return self.py__name__() + + def _py__path__(self): + search_path = self.evaluator.project.sys_path + init_path = self.py__file__() + if os.path.basename(init_path) == '__init__.py': + with open(init_path, 'rb') as f: + content = python_bytes_to_unicode(f.read(), errors='replace') + # these are strings that need to be used for namespace packages, + # the first one is ``pkgutil``, the second ``pkg_resources``. + options = ('declare_namespace(__name__)', 'extend_path(__path__') + if options[0] in content or options[1] in content: + # It is a namespace, now try to find the rest of the + # modules on sys_path or whatever the search_path is. + paths = set() + for s in search_path: + other = os.path.join(s, self.name.string_name) + if os.path.isdir(other): + paths.add(other) + if paths: + return list(paths) + # TODO I'm not sure if this is how nested namespace + # packages work. The tests are not really good enough to + # show that. + # Default to this. + return [self._get_init_directory()] + + @property + def py__path__(self): + """ + Not seen here, since it's a property. The callback actually uses a + variable, so use it like:: + + foo.py__path__(sys_path) + + In case of a package, this returns Python's __path__ attribute, which + is a list of paths (strings). + Raises an AttributeError if the module is not a package. + """ + path = self._get_init_directory() + + if path is None: + raise AttributeError('Only packages have __path__ attributes.') + else: + return self._py__path__ + + @evaluator_method_cache() + def _sub_modules_dict(self): + """ + Lists modules in the directory of this module (if this module is a + package). + """ + path = self._path + names = {} + if path is not None and path.endswith(os.path.sep + '__init__.py'): + mods = pkgutil.iter_modules([os.path.dirname(path)]) + for module_loader, name, is_pkg in mods: + # It's obviously a relative import to the current module. + names[name] = SubModuleName(self, name) + + # TODO add something like this in the future, its cleaner than the + # import hacks. + # ``os.path`` is a hardcoded exception, because it's a + # ``sys.modules`` modification. + # if str(self.name) == 'os': + # names.append(Name('path', parent_context=self)) + + return names + + def py__class__(self): + return compiled.get_special_object(self.evaluator, 'MODULE_CLASS') + + def __repr__(self): + return "<%s: %s@%s-%s>" % ( + self.__class__.__name__, self._string_name, + self.tree_node.start_pos[0], self.tree_node.end_pos[0]) + + diff --git a/pythonFiles/release/jedi/evaluate/context/namespace.py b/pythonFiles/release/jedi/evaluate/context/namespace.py new file mode 100644 index 000000000000..e40c23a5e8ca --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/context/namespace.py @@ -0,0 +1,74 @@ +import os +from itertools import chain + +from jedi._compatibility import use_metaclass +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate import imports +from jedi.evaluate.filters import DictFilter, AbstractNameDefinition +from jedi.evaluate.base_context import NO_CONTEXTS, TreeContext + + +class ImplicitNSName(AbstractNameDefinition): + """ + Accessing names for implicit namespace packages should infer to nothing. + This object will prevent Jedi from raising exceptions + """ + def __init__(self, implicit_ns_context, string_name): + self.implicit_ns_context = implicit_ns_context + self.string_name = string_name + + def infer(self): + return NO_CONTEXTS + + def get_root_context(self): + return self.implicit_ns_context + + +class ImplicitNamespaceContext(use_metaclass(CachedMetaClass, TreeContext)): + """ + Provides support for implicit namespace packages + """ + api_type = 'module' + parent_context = None + + def __init__(self, evaluator, fullname): + super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None) + self.evaluator = evaluator + self.fullname = fullname + + def get_filters(self, search_global, until_position=None, origin_scope=None): + yield DictFilter(self._sub_modules_dict()) + + @property + @evaluator_method_cache() + def name(self): + string_name = self.py__package__().rpartition('.')[-1] + return ImplicitNSName(self, string_name) + + def py__file__(self): + return None + + def py__package__(self): + """Return the fullname + """ + return self.fullname + + @property + def py__path__(self): + return lambda: [self.paths] + + @evaluator_method_cache() + def _sub_modules_dict(self): + names = {} + + paths = self.paths + file_names = chain.from_iterable(os.listdir(path) for path in paths) + mods = [ + file_name.rpartition('.')[0] if '.' in file_name else file_name + for file_name in file_names + if file_name != '__pycache__' + ] + + for name in mods: + names[name] = imports.SubModuleName(self, name) + return names diff --git a/pythonFiles/release/jedi/evaluate/docstrings.py b/pythonFiles/release/jedi/evaluate/docstrings.py index 84137de59742..f9c1141226e9 100755 --- a/pythonFiles/release/jedi/evaluate/docstrings.py +++ b/pythonFiles/release/jedi/evaluate/docstrings.py @@ -1,11 +1,12 @@ """ Docstrings are another source of information for functions and classes. :mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while -the docstring parsing is much easier. There are two different types of +the docstring parsing is much easier. There are three different types of docstrings that |jedi| understands: - `Sphinx `_ - `Epydoc `_ +- `Numpydoc `_ For example, the sphinx annotation ``:type foo: str`` clearly states that the type of ``foo`` is ``str``. @@ -14,20 +15,22 @@ annotations. """ -from ast import literal_eval import re -from itertools import chain from textwrap import dedent -from jedi.evaluate.cache import memoize_default -from jedi.parser import Parser, load_grammar -from jedi.common import indent_block -from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated +from parso import parse + +from jedi._compatibility import u +from jedi.evaluate.utils import indent_block +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \ + NO_CONTEXTS +from jedi.evaluate.lazy_context import LazyKnownContexts DOCSTRING_PARAM_PATTERNS = [ r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx - r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type + r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc ] @@ -44,23 +47,78 @@ except ImportError: def _search_param_in_numpydocstr(docstr, param_str): return [] + + def _search_return_in_numpydocstr(docstr): + return [] else: def _search_param_in_numpydocstr(docstr, param_str): """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" - params = NumpyDocString(docstr)._parsed_data['Parameters'] + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + params = NumpyDocString(docstr)._parsed_data['Parameters'] + except (KeyError, AttributeError): + return [] for p_name, p_type, p_descr in params: if p_name == param_str: m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) if m: p_type = m.group(1) - - if p_type.startswith('{'): - types = set(type(x).__name__ for x in literal_eval(p_type)) - return list(types) - else: - return [p_type] + return list(_expand_typestr(p_type)) return [] + def _search_return_in_numpydocstr(docstr): + """ + Search `docstr` (in numpydoc format) for type(-s) of function returns. + """ + doc = NumpyDocString(docstr) + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + returns = doc._parsed_data['Returns'] + returns += doc._parsed_data['Yields'] + except (KeyError, AttributeError): + raise StopIteration + for r_name, r_type, r_descr in returns: + #Return names are optional and if so the type is in the name + if not r_type: + r_type = r_name + for type_ in _expand_typestr(r_type): + yield type_ + + +def _expand_typestr(type_str): + """ + Attempts to interpret the possible types in `type_str` + """ + # Check if alternative types are specified with 'or' + if re.search('\\bor\\b', type_str): + for t in type_str.split('or'): + yield t.split('of')[0].strip() + # Check if like "list of `type`" and set type to list + elif re.search('\\bof\\b', type_str): + yield type_str.split('of')[0] + # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'} + elif type_str.startswith('{'): + node = parse(type_str, version='3.6').children[0] + if node.type == 'atom': + for leaf in node.children[1].children: + if leaf.type == 'number': + if '.' in leaf.value: + yield 'float' + else: + yield 'int' + elif leaf.type == 'string': + if 'b' in leaf.string_prefix.lower(): + yield 'bytes' + else: + yield 'str' + # Ignore everything else. + + # Otherwise just work with what we have. + else: + yield type_str + def _search_param_in_docstr(docstr, param_str): """ @@ -113,12 +171,16 @@ def _strip_rst_role(type_str): return type_str -def _evaluate_for_statement_string(evaluator, string, module): - code = dedent(""" +def _evaluate_for_statement_string(module_context, string): + code = dedent(u(""" def pseudo_docstring_stuff(): - # Create a pseudo function for docstring statements. - %s - """) + ''' + Create a pseudo function for docstring statements. + Need this docstring so that if the below part is not valid Python this + is still a function. + ''' + {0} + """)) if string is None: return [] @@ -130,31 +192,41 @@ def pseudo_docstring_stuff(): # Take the default grammar here, if we load the Python 2.7 grammar here, it # will be impossible to use `...` (Ellipsis) as a token. Docstring types # don't need to conform with the current grammar. - p = Parser(load_grammar(), code % indent_block(string)) + grammar = module_context.evaluator.latest_grammar + module = grammar.parse(code.format(indent_block(string))) try: - pseudo_cls = p.module.subscopes[0] - # First pick suite, then simple_stmt (-2 for DEDENT) and then the node, + funcdef = next(module.iter_funcdefs()) + # First pick suite, then simple_stmt and then the node, # which is also not the last item, because there's a newline. - stmt = pseudo_cls.children[-1].children[-2].children[-2] + stmt = funcdef.children[-1].children[-1].children[-2] except (AttributeError, IndexError): return [] + from jedi.evaluate.context import FunctionContext + function_context = FunctionContext( + module_context.evaluator, + module_context, + funcdef + ) + func_execution_context = function_context.get_function_execution() # Use the module of the param. # TODO this module is not the module of the param in case of a function # call. In that case it's the module of the function call. # stuffed with content from a function call. - pseudo_cls.parent = module - return list(_execute_types_in_stmt(evaluator, stmt)) + return list(_execute_types_in_stmt(func_execution_context, stmt)) -def _execute_types_in_stmt(evaluator, stmt): +def _execute_types_in_stmt(module_context, stmt): """ Executing all types or general elements that we find in a statement. This doesn't include tuple, list and dict literals, because the stuff they contain is executed. (Used as type information). """ - definitions = evaluator.eval_element(stmt) - return chain.from_iterable(_execute_array_values(evaluator, d) for d in definitions) + definitions = module_context.eval_node(stmt) + return ContextSet.from_sets( + _execute_array_values(module_context.evaluator, d) + for d in definitions + ) def _execute_array_values(evaluator, array): @@ -162,34 +234,56 @@ def _execute_array_values(evaluator, array): Tuples indicate that there's not just one return value, but the listed ones. `(str, int)` means that it returns a tuple with both types. """ - if isinstance(array, Array): + from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence + if isinstance(array, SequenceLiteralContext): values = [] - for typ in array.values(): - objects = _execute_array_values(evaluator, typ) - values.append(AlreadyEvaluated(objects)) - return [FakeSequence(evaluator, values, array.type)] + for lazy_context in array.py__iter__(): + objects = ContextSet.from_sets( + _execute_array_values(evaluator, typ) + for typ in lazy_context.infer() + ) + values.append(LazyKnownContexts(objects)) + return set([FakeSequence(evaluator, array.array_type, values)]) else: - return evaluator.execute(array) + return array.execute_evaluated() + +@evaluator_method_cache() +def infer_param(execution_context, param): + from jedi.evaluate.context.instance import AnonymousInstanceFunctionExecution -@memoize_default(None, evaluator_is_first_arg=True) -def follow_param(evaluator, param): - func = param.parent_function + def eval_docstring(docstring): + return ContextSet.from_iterable( + p + for param_str in _search_param_in_docstr(docstring, param.name.value) + for p in _evaluate_for_statement_string(module_context, param_str) + ) + module_context = execution_context.get_root_context() + func = param.get_parent_function() + if func.type == 'lambdef': + return NO_CONTEXTS - return [p - for param_str in _search_param_in_docstr(func.raw_doc, - str(param.name)) - for p in _evaluate_for_statement_string(evaluator, param_str, - param.get_parent_until())] + types = eval_docstring(execution_context.py__doc__()) + if isinstance(execution_context, AnonymousInstanceFunctionExecution) and \ + execution_context.function_context.name.string_name == '__init__': + class_context = execution_context.instance.class_context + types |= eval_docstring(class_context.py__doc__()) + return types -@memoize_default(None, evaluator_is_first_arg=True) -def find_return_types(evaluator, func): + +@evaluator_method_cache() +@iterator_to_context_set +def infer_return_types(function_context): def search_return_in_docstr(code): for p in DOCSTRING_RETURN_PATTERNS: match = p.search(code) if match: - return _strip_rst_role(match.group(1)) - - type_str = search_return_in_docstr(func.raw_doc) - return _evaluate_for_statement_string(evaluator, type_str, func.get_parent_until()) + yield _strip_rst_role(match.group(1)) + # Check for numpy style return hint + for type_ in _search_return_in_numpydocstr(code): + yield type_ + + for type_str in search_return_in_docstr(function_context.py__doc__()): + for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str): + yield type_eval diff --git a/pythonFiles/release/jedi/evaluate/dynamic.py b/pythonFiles/release/jedi/evaluate/dynamic.py index 04ed909a1949..7d05000dc9d5 100755 --- a/pythonFiles/release/jedi/evaluate/dynamic.py +++ b/pythonFiles/release/jedi/evaluate/dynamic.py @@ -14,31 +14,40 @@ def foo(bar): - |Jedi| sees a param - search for function calls named ``foo`` -- execute these calls and check the input. This work with a ``ParamListener``. +- execute these calls and check the input. """ -from itertools import chain -from jedi._compatibility import unicode -from jedi.parser import tree +from parso.python import tree from jedi import settings from jedi import debug -from jedi.evaluate.cache import memoize_default +from jedi.evaluate.cache import evaluator_function_cache from jedi.evaluate import imports +from jedi.evaluate.arguments import TreeArguments +from jedi.evaluate.param import create_default_params +from jedi.evaluate.helpers import is_stdlib_path +from jedi.evaluate.utils import to_list +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.context import ModuleContext, instance +from jedi.evaluate.base_context import ContextSet -class ParamListener(object): + +MAX_PARAM_SEARCHES = 20 + + +class MergedExecutedParams(object): """ - This listener is used to get the params for a function. + Simulates being a parameter while actually just being multiple params. """ - def __init__(self): - self.param_possibilities = [] + def __init__(self, executed_params): + self._executed_params = executed_params - def execute(self, params): - self.param_possibilities += params + def infer(self): + return ContextSet.from_sets(p.infer() for p in self._executed_params) @debug.increase_indent -def search_params(evaluator, param): +def search_params(evaluator, execution_context, funcdef): """ A dynamic search for param values. If you try to complete a type: @@ -52,95 +61,143 @@ def search_params(evaluator, param): is. """ if not settings.dynamic_params: - return [] + return create_default_params(execution_context, funcdef) - func = param.get_parent_until(tree.Function) - debug.dbg('Dynamic param search for %s in %s.', param, str(func.name)) - # Compare the param names. - names = [n for n in search_function_call(evaluator, func) - if n.value == param.name.value] - # Evaluate the ExecutedParams to types. - result = list(chain.from_iterable(n.parent.eval(evaluator) for n in names)) - debug.dbg('Dynamic param result %s', result) - return result + evaluator.dynamic_params_depth += 1 + try: + path = execution_context.get_root_context().py__file__() + if path is not None and is_stdlib_path(path): + # We don't want to search for usages in the stdlib. Usually people + # don't work with it (except if you are a core maintainer, sorry). + # This makes everything slower. Just disable it and run the tests, + # you will see the slowdown, especially in 3.6. + return create_default_params(execution_context, funcdef) + + debug.dbg('Dynamic param search in %s.', funcdef.name.value, color='MAGENTA') + + module_context = execution_context.get_root_context() + function_executions = _search_function_executions( + evaluator, + module_context, + funcdef + ) + if function_executions: + zipped_params = zip(*list( + function_execution.get_params() + for function_execution in function_executions + )) + params = [MergedExecutedParams(executed_params) for executed_params in zipped_params] + # Evaluate the ExecutedParams to types. + else: + return create_default_params(execution_context, funcdef) + debug.dbg('Dynamic param result finished', color='MAGENTA') + return params + finally: + evaluator.dynamic_params_depth -= 1 -@memoize_default([], evaluator_is_first_arg=True) -def search_function_call(evaluator, func): +@evaluator_function_cache(default=None) +@to_list +def _search_function_executions(evaluator, module_context, funcdef): """ Returns a list of param names. """ - from jedi.evaluate import representation as er - - def get_params_for_module(module): - """ - Returns the values of a param, or an empty array. - """ - @memoize_default([], evaluator_is_first_arg=True) - def get_posibilities(evaluator, module, func_name): - try: - names = module.used_names[func_name] - except KeyError: - return [] - - for name in names: - parent = name.parent - if tree.is_node(parent, 'trailer'): - parent = parent.parent - - trailer = None - if tree.is_node(parent, 'power'): - for t in parent.children[1:]: - if t == '**': - break - if t.start_pos > name.start_pos and t.children[0] == '(': - trailer = t - break - if trailer is not None: - types = evaluator.goto_definition(name) - - # We have to remove decorators, because they are not the - # "original" functions, this way we can easily compare. - # At the same time we also have to remove InstanceElements. - undec = [] - for escope in types: - if escope.isinstance(er.Function, er.Instance) \ - and escope.decorates is not None: - undec.append(escope.decorates) - elif isinstance(escope, er.InstanceElement): - undec.append(escope.var) - else: - undec.append(escope) - - if evaluator.wrap(compare) in undec: - # Only if we have the correct function we execute - # it, otherwise just ignore it. - evaluator.eval_trailer(types, trailer) - return listener.param_possibilities - return get_posibilities(evaluator, module, func_name) - - current_module = func.get_parent_until() - func_name = unicode(func.name) - compare = func - if func_name == '__init__': - cls = func.get_parent_scope() + func_string_name = funcdef.name.value + compare_node = funcdef + if func_string_name == '__init__': + cls = get_parent_scope(funcdef) if isinstance(cls, tree.Class): - func_name = unicode(cls.name) - compare = cls - - # add the listener - listener = ParamListener() - func.listeners.add(listener) - + func_string_name = cls.name.value + compare_node = cls + + found_executions = False + i = 0 + for for_mod_context in imports.get_modules_containing_name( + evaluator, [module_context], func_string_name): + if not isinstance(module_context, ModuleContext): + return + for name, trailer in _get_possible_nodes(for_mod_context, func_string_name): + i += 1 + + # This is a simple way to stop Jedi's dynamic param recursion + # from going wild: The deeper Jedi's in the recursion, the less + # code should be evaluated. + if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES: + return + + random_context = evaluator.create_context(for_mod_context, name) + for function_execution in _check_name_for_execution( + evaluator, random_context, compare_node, name, trailer): + found_executions = True + yield function_execution + + # If there are results after processing a module, we're probably + # good to process. This is a speed optimization. + if found_executions: + return + + +def _get_possible_nodes(module_context, func_string_name): try: - result = [] - # This is like backtracking: Get the first possible result. - for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name): - result = get_params_for_module(mod) - if result: - break - finally: - # cleanup: remove the listener; important: should not stick. - func.listeners.remove(listener) - - return result + names = module_context.tree_node.get_used_names()[func_string_name] + except KeyError: + return + + for name in names: + bracket = name.get_next_leaf() + trailer = bracket.parent + if trailer.type == 'trailer' and bracket == '(': + yield name, trailer + + +def _check_name_for_execution(evaluator, context, compare_node, name, trailer): + from jedi.evaluate.context.function import FunctionExecutionContext + + def create_func_excs(): + arglist = trailer.children[1] + if arglist == ')': + arglist = () + args = TreeArguments(evaluator, context, arglist, trailer) + if value_node.type == 'funcdef': + yield value.get_function_execution(args) + else: + created_instance = instance.TreeInstance( + evaluator, + value.parent_context, + value, + args + ) + for execution in created_instance.create_init_executions(): + yield execution + + for value in evaluator.goto_definitions(context, name): + value_node = value.tree_node + if compare_node == value_node: + for func_execution in create_func_excs(): + yield func_execution + elif isinstance(value.parent_context, FunctionExecutionContext) and \ + compare_node.type == 'funcdef': + # Here we're trying to find decorators by checking the first + # parameter. It's not very generic though. Should find a better + # solution that also applies to nested decorators. + params = value.parent_context.get_params() + if len(params) != 1: + continue + values = params[0].infer() + nodes = [v.tree_node for v in values] + if nodes == [compare_node]: + # Found a decorator. + module_context = context.get_root_context() + execution_context = next(create_func_excs()) + for name, trailer in _get_possible_nodes(module_context, params[0].string_name): + if value_node.start_pos < name.start_pos < value_node.end_pos: + random_context = evaluator.create_context(execution_context, name) + iterator = _check_name_for_execution( + evaluator, + random_context, + compare_node, + name, + trailer + ) + for function_execution in iterator: + yield function_execution diff --git a/pythonFiles/release/jedi/evaluate/filters.py b/pythonFiles/release/jedi/evaluate/filters.py new file mode 100644 index 000000000000..35dff9dace65 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/filters.py @@ -0,0 +1,434 @@ +""" +Filters are objects that you can use to filter names in different scopes. They +are needed for name resolution. +""" +from abc import abstractmethod + +from parso.tree import search_ancestor + +from jedi._compatibility import is_py3 +from jedi.evaluate import flow_analysis +from jedi.evaluate.base_context import ContextSet, Context +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.utils import to_list + + +class AbstractNameDefinition(object): + start_pos = None + string_name = None + parent_context = None + tree_name = None + + @abstractmethod + def infer(self): + raise NotImplementedError + + @abstractmethod + def goto(self): + # Typically names are already definitions and therefore a goto on that + # name will always result on itself. + return set([self]) + + def get_root_context(self): + return self.parent_context.get_root_context() + + def __repr__(self): + if self.start_pos is None: + return '<%s: %s>' % (self.__class__.__name__, self.string_name) + return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos) + + def execute(self, arguments): + return self.infer().execute(arguments) + + def execute_evaluated(self, *args, **kwargs): + return self.infer().execute_evaluated(*args, **kwargs) + + @property + def api_type(self): + return self.parent_context.api_type + + +class AbstractTreeName(AbstractNameDefinition): + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def goto(self): + return self.parent_context.evaluator.goto(self.parent_context, self.tree_name) + + @property + def string_name(self): + return self.tree_name.value + + @property + def start_pos(self): + return self.tree_name.start_pos + + +class ContextNameMixin(object): + def infer(self): + return ContextSet(self._context) + + def get_root_context(self): + if self.parent_context is None: + return self._context + return super(ContextNameMixin, self).get_root_context() + + @property + def api_type(self): + return self._context.api_type + + +class ContextName(ContextNameMixin, AbstractTreeName): + def __init__(self, context, tree_name): + super(ContextName, self).__init__(context.parent_context, tree_name) + self._context = context + + +class TreeNameDefinition(AbstractTreeName): + _API_TYPES = dict( + import_name='module', + import_from='module', + funcdef='function', + param='param', + classdef='class', + ) + + def infer(self): + # Refactor this, should probably be here. + from jedi.evaluate.syntax_tree import tree_name_to_contexts + return tree_name_to_contexts(self.parent_context.evaluator, self.parent_context, self.tree_name) + + @property + def api_type(self): + definition = self.tree_name.get_definition(import_name_always=True) + if definition is None: + return 'statement' + return self._API_TYPES.get(definition.type, 'statement') + + +class ParamName(AbstractTreeName): + api_type = 'param' + + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def infer(self): + return self.get_param().infer() + + def get_param(self): + params = self.parent_context.get_params() + param_node = search_ancestor(self.tree_name, 'param') + return params[param_node.position_index] + + +class AnonymousInstanceParamName(ParamName): + def infer(self): + param_node = search_ancestor(self.tree_name, 'param') + # TODO I think this should not belong here. It's not even really true, + # because classmethod and other descriptors can change it. + if param_node.position_index == 0: + # This is a speed optimization, to return the self param (because + # it's known). This only affects anonymous instances. + return ContextSet(self.parent_context.instance) + else: + return self.get_param().infer() + + +class AbstractFilter(object): + _until_position = None + + def _filter(self, names): + if self._until_position is not None: + return [n for n in names if n.start_pos < self._until_position] + return names + + @abstractmethod + def get(self, name): + raise NotImplementedError + + @abstractmethod + def values(self): + raise NotImplementedError + + +class AbstractUsedNamesFilter(AbstractFilter): + name_class = TreeNameDefinition + + def __init__(self, context, parser_scope): + self._parser_scope = parser_scope + self._used_names = self._parser_scope.get_root_node().get_used_names() + self.context = context + + def get(self, name): + try: + names = self._used_names[str(name)] + except KeyError: + return [] + + return self._convert_names(self._filter(names)) + + def _convert_names(self, names): + return [self.name_class(self.context, name) for name in names] + + def values(self): + return self._convert_names(name for name_list in self._used_names.values() + for name in self._filter(name_list)) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.context) + + +class ParserTreeFilter(AbstractUsedNamesFilter): + def __init__(self, evaluator, context, node_context=None, until_position=None, + origin_scope=None): + """ + node_context is an option to specify a second context for use cases + like the class mro where the parent class of a new name would be the + context, but for some type inference it's important to have a local + context of the other classes. + """ + if node_context is None: + node_context = context + super(ParserTreeFilter, self).__init__(context, node_context.tree_node) + self._node_context = node_context + self._origin_scope = origin_scope + self._until_position = until_position + + def _filter(self, names): + names = super(ParserTreeFilter, self)._filter(names) + names = [n for n in names if self._is_name_reachable(n)] + return list(self._check_flows(names)) + + def _is_name_reachable(self, name): + if not name.is_definition(): + return False + parent = name.parent + if parent.type == 'trailer': + return False + base_node = parent if parent.type in ('classdef', 'funcdef') else name + return get_parent_scope(base_node) == self._parser_scope + + def _check_flows(self, names): + for name in sorted(names, key=lambda name: name.start_pos, reverse=True): + check = flow_analysis.reachability_check( + self._node_context, self._parser_scope, name, self._origin_scope + ) + if check is not flow_analysis.UNREACHABLE: + yield name + + if check is flow_analysis.REACHABLE: + break + + +class FunctionExecutionFilter(ParserTreeFilter): + param_name = ParamName + + def __init__(self, evaluator, context, node_context=None, + until_position=None, origin_scope=None): + super(FunctionExecutionFilter, self).__init__( + evaluator, + context, + node_context, + until_position, + origin_scope + ) + + @to_list + def _convert_names(self, names): + for name in names: + param = search_ancestor(name, 'param') + if param: + yield self.param_name(self.context, name) + else: + yield TreeNameDefinition(self.context, name) + + +class AnonymousInstanceFunctionExecutionFilter(FunctionExecutionFilter): + param_name = AnonymousInstanceParamName + + +class GlobalNameFilter(AbstractUsedNamesFilter): + def __init__(self, context, parser_scope): + super(GlobalNameFilter, self).__init__(context, parser_scope) + + @to_list + def _filter(self, names): + for name in names: + if name.parent.type == 'global_stmt': + yield name + + +class DictFilter(AbstractFilter): + def __init__(self, dct): + self._dct = dct + + def get(self, name): + try: + value = self._convert(name, self._dct[str(name)]) + except KeyError: + return [] + + return list(self._filter([value])) + + def values(self): + return self._filter(self._convert(*item) for item in self._dct.items()) + + def _convert(self, name, value): + return value + + +class _BuiltinMappedMethod(Context): + """``Generator.__next__`` ``dict.values`` methods and so on.""" + api_type = 'function' + + def __init__(self, builtin_context, method, builtin_func): + super(_BuiltinMappedMethod, self).__init__( + builtin_context.evaluator, + parent_context=builtin_context + ) + self._method = method + self._builtin_func = builtin_func + + def py__call__(self, params): + return self._method(self.parent_context) + + def __getattr__(self, name): + return getattr(self._builtin_func, name) + + +class SpecialMethodFilter(DictFilter): + """ + A filter for methods that are defined in this module on the corresponding + classes like Generator (for __next__, etc). + """ + class SpecialMethodName(AbstractNameDefinition): + api_type = 'function' + + def __init__(self, parent_context, string_name, callable_, builtin_context): + self.parent_context = parent_context + self.string_name = string_name + self._callable = callable_ + self._builtin_context = builtin_context + + def infer(self): + filter = next(self._builtin_context.get_filters()) + # We can take the first index, because on builtin methods there's + # always only going to be one name. The same is true for the + # inferred values. + builtin_func = next(iter(filter.get(self.string_name)[0].infer())) + return ContextSet(_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)) + + def __init__(self, context, dct, builtin_context): + super(SpecialMethodFilter, self).__init__(dct) + self.context = context + self._builtin_context = builtin_context + """ + This context is what will be used to introspect the name, where as the + other context will be used to execute the function. + + We distinguish, because we have to. + """ + + def _convert(self, name, value): + return self.SpecialMethodName(self.context, name, value, self._builtin_context) + + +def has_builtin_methods(cls): + base_dct = {} + # Need to care properly about inheritance. Builtin Methods should not get + # lost, just because they are not mentioned in a class. + for base_cls in reversed(cls.__bases__): + try: + base_dct.update(base_cls.builtin_methods) + except AttributeError: + pass + + cls.builtin_methods = base_dct + for func in cls.__dict__.values(): + try: + cls.builtin_methods.update(func.registered_builtin_methods) + except AttributeError: + pass + return cls + + +def register_builtin_method(method_name, python_version_match=None): + def wrapper(func): + if python_version_match and python_version_match != 2 + int(is_py3): + # Some functions do only apply to certain versions. + return func + dct = func.__dict__.setdefault('registered_builtin_methods', {}) + dct[method_name] = func + return func + return wrapper + + +def get_global_filters(evaluator, context, until_position, origin_scope): + """ + Returns all filters in order of priority for name resolution. + + For global name lookups. The filters will handle name resolution + themselves, but here we gather possible filters downwards. + + >>> from jedi._compatibility import u, no_unicode_pprint + >>> from jedi import Script + >>> script = Script(u(''' + ... x = ['a', 'b', 'c'] + ... def func(): + ... y = None + ... ''')) + >>> module_node = script._get_module_node() + >>> scope = next(module_node.iter_funcdefs()) + >>> scope + + >>> context = script._get_module().create_context(scope) + >>> filters = list(get_global_filters(context.evaluator, context, (4, 0), None)) + + First we get the names names from the function scope. + + >>> no_unicode_pprint(filters[0]) + > + >>> sorted(str(n) for n in filters[0].values()) + ['', ''] + >>> filters[0]._until_position + (4, 0) + + Then it yields the names from one level "lower". In this example, this is + the module scope. As a side note, you can see, that the position in the + filter is now None, because typically the whole module is loaded before the + function is called. + + >>> filters[1].values() # global names -> there are none in our example. + [] + >>> list(filters[2].values()) # package modules -> Also empty. + [] + >>> sorted(name.string_name for name in filters[3].values()) # Module attributes + ['__doc__', '__file__', '__name__', '__package__'] + >>> print(filters[1]._until_position) + None + + Finally, it yields the builtin filter, if `include_builtin` is + true (default). + + >>> filters[4].values() #doctest: +ELLIPSIS + [, ...] + """ + from jedi.evaluate.context.function import FunctionExecutionContext + while context is not None: + # Names in methods cannot be resolved within the class. + for filter in context.get_filters( + search_global=True, + until_position=until_position, + origin_scope=origin_scope): + yield filter + if isinstance(context, FunctionExecutionContext): + # The position should be reset if the current scope is a function. + until_position = None + + context = context.parent_context + + # Add builtins to the global scope. + for filter in evaluator.BUILTINS.get_filters(search_global=True): + yield filter diff --git a/pythonFiles/release/jedi/evaluate/finder.py b/pythonFiles/release/jedi/evaluate/finder.py index c112f971b277..96032ae9b792 100755 --- a/pythonFiles/release/jedi/evaluate/finder.py +++ b/pythonFiles/release/jedi/evaluate/finder.py @@ -3,6 +3,9 @@ Python. The name resolution is quite complicated with descripter, ``__getattribute__``, ``__getattr__``, ``global``, etc. +If you want to understand name resolution, please read the first few chapters +in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/. + Flow checks +++++++++++ @@ -11,371 +14,172 @@ Unfortunately every other thing is being ignored (e.g. a == '' would be easy to check for -> a is a string). There's big potential in these checks. """ -from itertools import chain -from jedi._compatibility import unicode, u -from jedi.parser import tree +from parso.python import tree +from parso.tree import search_ancestor from jedi import debug -from jedi import common from jedi import settings -from jedi.evaluate import representation as er -from jedi.evaluate import dynamic +from jedi.evaluate.context import AbstractInstanceContext from jedi.evaluate import compiled -from jedi.evaluate import docstrings -from jedi.evaluate import iterable -from jedi.evaluate import imports from jedi.evaluate import analysis from jedi.evaluate import flow_analysis -from jedi.evaluate import param +from jedi.evaluate.arguments import TreeArguments from jedi.evaluate import helpers -from jedi.evaluate.cache import memoize_default - - -def filter_after_position(names, position): - """ - Removes all names after a certain position. If position is None, just - returns the names list. - """ - if position is None: - return names - - names_new = [] - for n in names: - # Filter positions and also allow list comprehensions and lambdas. - if n.start_pos[0] is not None and n.start_pos < position \ - or isinstance(n.get_definition(), (tree.CompFor, tree.Lambda)): - names_new.append(n) - return names_new - - -def filter_definition_names(names, origin, position=None): - """ - Filter names that are actual definitions in a scope. Names that are just - used will be ignored. - """ - # Just calculate the scope from the first - stmt = names[0].get_definition() - scope = stmt.get_parent_scope() - - if not (isinstance(scope, er.FunctionExecution) - and isinstance(scope.base, er.LambdaWrapper)): - names = filter_after_position(names, position) - names = [name for name in names if name.is_definition()] - - # Private name mangling (compile.c) disallows access on names - # preceeded by two underscores `__` if used outside of the class. Names - # that also end with two underscores (e.g. __id__) are not affected. - for name in list(names): - if name.value.startswith('__') and not name.value.endswith('__'): - if filter_private_variable(scope, origin): - names.remove(name) - return names +from jedi.evaluate.context import iterable +from jedi.evaluate.filters import get_global_filters, TreeNameDefinition +from jedi.evaluate.base_context import ContextSet +from jedi.parser_utils import is_scope, get_parent_scope class NameFinder(object): - def __init__(self, evaluator, scope, name_str, position=None): + def __init__(self, evaluator, context, name_context, name_or_str, + position=None, analysis_errors=True): self._evaluator = evaluator # Make sure that it's not just a syntax tree node. - self.scope = evaluator.wrap(scope) - self.name_str = name_str - self.position = position + self._context = context + self._name_context = name_context + self._name = name_or_str + if isinstance(name_or_str, tree.Name): + self._string_name = name_or_str.value + else: + self._string_name = name_or_str + self._position = position + self._found_predefined_types = None + self._analysis_errors = analysis_errors @debug.increase_indent - def find(self, scopes, search_global=False): - # TODO rename scopes to names_dicts - names = self.filter_name(scopes) - types = self._names_to_types(names, search_global) - - if not names and not types \ - and not (isinstance(self.name_str, tree.Name) - and isinstance(self.name_str.parent.parent, tree.Param)): - if not isinstance(self.name_str, (str, unicode)): # TODO Remove? - if search_global: - message = ("NameError: name '%s' is not defined." - % self.name_str) - analysis.add(self._evaluator, 'name-error', self.name_str, - message) + def find(self, filters, attribute_lookup): + """ + :params bool attribute_lookup: Tell to logic if we're accessing the + attribute or the contents of e.g. a function. + """ + names = self.filter_name(filters) + if self._found_predefined_types is not None and names: + check = flow_analysis.reachability_check( + self._context, self._context.tree_node, self._name) + if check is flow_analysis.UNREACHABLE: + return ContextSet() + return self._found_predefined_types + + types = self._names_to_types(names, attribute_lookup) + + if not names and self._analysis_errors and not types \ + and not (isinstance(self._name, tree.Name) and + isinstance(self._name.parent.parent, tree.Param)): + if isinstance(self._name, tree.Name): + if attribute_lookup: + analysis.add_attribute_error( + self._name_context, self._context, self._name) else: - analysis.add_attribute_error(self._evaluator, - self.scope, self.name_str) + message = ("NameError: name '%s' is not defined." + % self._string_name) + analysis.add(self._name_context, 'name-error', self._name, message) - debug.dbg('finder._names_to_types: %s -> %s', names, types) return types - def scopes(self, search_global=False): - if search_global: - return global_names_dict_generator(self._evaluator, self.scope, self.position) + def _get_origin_scope(self): + if isinstance(self._name, tree.Name): + scope = self._name + while scope.parent is not None: + # TODO why if classes? + if not isinstance(scope, tree.Scope): + break + scope = scope.parent + return scope else: - return ((n, None) for n in self.scope.names_dicts(search_global)) - - def names_dict_lookup(self, names_dict, position): - def get_param(scope, el): - if isinstance(el.get_parent_until(tree.Param), tree.Param): - return scope.param_by_name(str(el)) - return el - - search_str = str(self.name_str) - try: - names = names_dict[search_str] - if not names: # We want names, otherwise stop. - return [] - except KeyError: - return [] + return None - names = filter_definition_names(names, self.name_str, position) - - name_scope = None - # Only the names defined in the last position are valid definitions. - last_names = [] - for name in reversed(sorted(names, key=lambda name: name.start_pos)): - stmt = name.get_definition() - name_scope = self._evaluator.wrap(stmt.get_parent_scope()) - - if isinstance(self.scope, er.Instance) and not isinstance(name_scope, er.Instance): - # Instances should not be checked for positioning, because we - # don't know in which order the functions are called. - last_names.append(name) - continue - - if isinstance(name_scope, compiled.CompiledObject): - # Let's test this. TODO need comment. shouldn't this be - # filtered before? - last_names.append(name) - continue - - if isinstance(name, compiled.CompiledName) \ - or isinstance(name, er.InstanceName) and isinstance(name._origin_name, compiled.CompiledName): - last_names.append(name) - continue - - if isinstance(self.name_str, tree.Name): - origin_scope = self.name_str.get_parent_until(tree.Scope, reverse=True) - else: - origin_scope = None - if isinstance(stmt.parent, compiled.CompiledObject): - # TODO seriously? this is stupid. - continue - check = flow_analysis.break_check(self._evaluator, name_scope, - stmt, origin_scope) - if check is not flow_analysis.UNREACHABLE: - last_names.append(name) - if check is flow_analysis.REACHABLE: - break - - if isinstance(name_scope, er.FunctionExecution): - # Replace params - return [get_param(name_scope, n) for n in last_names] - return last_names + def get_filters(self, search_global=False): + origin_scope = self._get_origin_scope() + if search_global: + return get_global_filters(self._evaluator, self._context, self._position, origin_scope) + else: + return self._context.get_filters(search_global, self._position, origin_scope=origin_scope) - def filter_name(self, names_dicts): + def filter_name(self, filters): """ Searches names that are defined in a scope (the different - `names_dicts`), until a name fits. + ``filters``), until a name fits. """ names = [] - for names_dict, position in names_dicts: - names = self.names_dict_lookup(names_dict, position) + if self._context.predefined_names: + # TODO is this ok? node might not always be a tree.Name + node = self._name + while node is not None and not is_scope(node): + node = node.parent + if node.type in ("if_stmt", "for_stmt", "comp_for"): + try: + name_dict = self._context.predefined_names[node] + types = name_dict[self._string_name] + except KeyError: + continue + else: + self._found_predefined_types = types + break + + for filter in filters: + names = filter.get(self._string_name) if names: + if len(names) == 1: + n, = names + if isinstance(n, TreeNameDefinition): + # Something somewhere went terribly wrong. This + # typically happens when using goto on an import in an + # __init__ file. I think we need a better solution, but + # it's kind of hard, because for Jedi it's not clear + # that that name has not been defined, yet. + if n.tree_name == self._name: + if self._name.get_definition().type == 'import_from': + continue break - debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str, - self.scope, u(names), self.position) - return list(self._clean_names(names)) - - def _clean_names(self, names): - """ - ``NameFinder.filter_name`` should only output names with correct - wrapper parents. We don't want to see AST classes out in the - evaluation, so remove them already here! - """ - for n in names: - definition = n.parent - if isinstance(definition, (tree.Function, tree.Class, tree.Module)): - yield self._evaluator.wrap(definition).name - else: - yield n + debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name, + self._context, names, self._position) + return list(names) def _check_getattr(self, inst): """Checks for both __getattr__ and __getattribute__ methods""" - result = [] # str is important, because it shouldn't be `Name`! - name = compiled.create(self._evaluator, str(self.name_str)) - with common.ignored(KeyError): - result = inst.execute_subscope_by_name('__getattr__', name) - if not result: - # this is a little bit special. `__getattribute__` is executed - # before anything else. But: I know no use case, where this - # could be practical and the jedi would return wrong types. If - # you ever have something, let me know! - with common.ignored(KeyError): - result = inst.execute_subscope_by_name('__getattribute__', name) - return result - - def _names_to_types(self, names, search_global): - types = [] + name = compiled.create(self._evaluator, self._string_name) + + # This is a little bit special. `__getattribute__` is in Python + # executed before `__getattr__`. But: I know no use case, where + # this could be practical and where Jedi would return wrong types. + # If you ever find something, let me know! + # We are inversing this, because a hand-crafted `__getattribute__` + # could still call another hand-crafted `__getattr__`, but not the + # other way around. + names = (inst.get_function_slot_names('__getattr__') or + inst.get_function_slot_names('__getattribute__')) + return inst.execute_function_slots(names, name) + + def _names_to_types(self, names, attribute_lookup): + contexts = ContextSet.from_sets(name.infer() for name in names) + + debug.dbg('finder._names_to_types: %s -> %s', names, contexts) + if not names and isinstance(self._context, AbstractInstanceContext): + # handling __getattr__ / __getattribute__ + return self._check_getattr(self._context) # Add isinstance and other if/assert knowledge. - if isinstance(self.name_str, tree.Name): - # Ignore FunctionExecution parents for now. - flow_scope = self.name_str - until = flow_scope.get_parent_until(er.FunctionExecution) - while not isinstance(until, er.FunctionExecution): - flow_scope = flow_scope.get_parent_scope(include_flows=True) - if flow_scope is None: - break - # TODO check if result is in scope -> no evaluation necessary - n = check_flow_information(self._evaluator, flow_scope, - self.name_str, self.position) - if n: + if not contexts and isinstance(self._name, tree.Name) and \ + not isinstance(self._name_context, AbstractInstanceContext): + flow_scope = self._name + base_node = self._name_context.tree_node + if base_node.type == 'comp_for': + return contexts + while True: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + n = _check_flow_information(self._name_context, flow_scope, + self._name, self._position) + if n is not None: return n - - for name in names: - new_types = _name_to_types(self._evaluator, name, self.scope) - if isinstance(self.scope, (er.Class, er.Instance)) and not search_global: - types += self._resolve_descriptors(name, new_types) - else: - types += new_types - if not names and isinstance(self.scope, er.Instance): - # handling __getattr__ / __getattribute__ - types = self._check_getattr(self.scope) - - return types - - def _resolve_descriptors(self, name, types): - # The name must not be in the dictionary, but part of the class - # definition. __get__ is only called if the descriptor is defined in - # the class dictionary. - name_scope = name.get_definition().get_parent_scope() - if not isinstance(name_scope, (er.Instance, tree.Class)): - return types - - result = [] - for r in types: - try: - desc_return = r.get_descriptor_returns - except AttributeError: - result.append(r) - else: - result += desc_return(self.scope) - return result - - -@memoize_default([], evaluator_is_first_arg=True) -def _name_to_types(evaluator, name, scope): - types = [] - typ = name.get_definition() - if typ.isinstance(tree.ForStmt): - for_types = evaluator.eval_element(typ.children[3]) - for_types = iterable.get_iterator_types(for_types) - types += check_tuple_assignments(for_types, name) - elif typ.isinstance(tree.CompFor): - for_types = evaluator.eval_element(typ.children[3]) - for_types = iterable.get_iterator_types(for_types) - types += check_tuple_assignments(for_types, name) - elif isinstance(typ, tree.Param): - types += _eval_param(evaluator, typ, scope) - elif typ.isinstance(tree.ExprStmt): - types += _remove_statements(evaluator, typ, name) - elif typ.isinstance(tree.WithStmt): - types += evaluator.eval_element(typ.node_from_name(name)) - elif isinstance(typ, tree.Import): - types += imports.ImportWrapper(evaluator, name).follow() - elif isinstance(typ, tree.GlobalStmt): - # TODO theoretically we shouldn't be using search_global here, it - # doesn't make sense, because it's a local search (for that name)! - # However, globals are not that important and resolving them doesn't - # guarantee correctness in any way, because we don't check for when - # something is executed. - types += evaluator.find_types(typ.get_parent_scope(), str(name), - search_global=True) - elif isinstance(typ, tree.TryStmt): - # TODO an exception can also be a tuple. Check for those. - # TODO check for types that are not classes and add it to - # the static analysis report. - exceptions = evaluator.eval_element(name.prev_sibling().prev_sibling()) - types = list(chain.from_iterable( - evaluator.execute(t) for t in exceptions)) - else: - if typ.isinstance(er.Function): - typ = typ.get_decorated_func() - types.append(typ) - return types - - -def _remove_statements(evaluator, stmt, name): - """ - This is the part where statements are being stripped. - - Due to lazy evaluation, statements like a = func; b = a; b() have to be - evaluated. - """ - types = [] - # Remove the statement docstr stuff for now, that has to be - # implemented with the evaluator class. - #if stmt.docstr: - #res_new.append(stmt) - - check_instance = None - if isinstance(stmt, er.InstanceElement) and stmt.is_class_var: - check_instance = stmt.instance - stmt = stmt.var - - types += evaluator.eval_statement(stmt, seek_name=name) - - if check_instance is not None: - # class renames - types = [er.get_instance_el(evaluator, check_instance, a, True) - if isinstance(a, (er.Function, tree.Function)) - else a for a in types] - return types - - -def _eval_param(evaluator, param, scope): - res_new = [] - func = param.get_parent_scope() - - cls = func.parent.get_parent_until((tree.Class, tree.Function)) - - from jedi.evaluate.param import ExecutedParam, Arguments - if isinstance(cls, tree.Class) and param.position_nr == 0 \ - and not isinstance(param, ExecutedParam): - # This is where we add self - if it has never been - # instantiated. - if isinstance(scope, er.InstanceElement): - res_new.append(scope.instance) - else: - inst = er.Instance(evaluator, evaluator.wrap(cls), - Arguments(evaluator, ()), is_generated=True) - res_new.append(inst) - return res_new - - # Instances are typically faked, if the instance is not called from - # outside. Here we check it for __init__ functions and return. - if isinstance(func, er.InstanceElement) \ - and func.instance.is_generated and str(func.name) == '__init__': - param = func.var.params[param.position_nr] - - # Add docstring knowledge. - doc_params = docstrings.follow_param(evaluator, param) - if doc_params: - return doc_params - - if isinstance(param, ExecutedParam): - return res_new + param.eval(evaluator) - else: - # Param owns no information itself. - res_new += dynamic.search_params(evaluator, param) - if not res_new: - if param.stars: - t = 'tuple' if param.stars == 1 else 'dict' - typ = evaluator.find_types(compiled.builtin, t)[0] - res_new = evaluator.execute(typ) - if param.default: - res_new += evaluator.eval_element(param.default) - return res_new + if flow_scope == base_node: + break + return contexts -def check_flow_information(evaluator, flow, search_name, pos): +def _check_flow_information(context, flow, search_name, pos): """ Try to find out the type of a variable just with the information that is given by the flows: e.g. It is also responsible for assert checks.:: @@ -387,161 +191,68 @@ def check_flow_information(evaluator, flow, search_name, pos): if not settings.dynamic_flow_information: return None - result = [] - if flow.is_scope(): + result = None + if is_scope(flow): # Check for asserts. + module_node = flow.get_root_node() try: - names = reversed(flow.names_dict[search_name.value]) - except (KeyError, AttributeError): - names = [] + names = module_node.get_used_names()[search_name.value] + except KeyError: + return None + names = reversed([ + n for n in names + if flow.start_pos <= n.start_pos < (pos or flow.end_pos) + ]) for name in names: - ass = name.get_parent_until(tree.AssertStmt) - if isinstance(ass, tree.AssertStmt) and pos is not None and ass.start_pos < pos: - result = _check_isinstance_type(evaluator, ass.assertion(), search_name) - if result: - break - - if isinstance(flow, (tree.IfStmt, tree.WhileStmt)): - element = flow.children[1] - result = _check_isinstance_type(evaluator, element, search_name) + ass = search_ancestor(name, 'assert_stmt') + if ass is not None: + result = _check_isinstance_type(context, ass.assertion, search_name) + if result is not None: + return result + + if flow.type in ('if_stmt', 'while_stmt'): + potential_ifs = [c for c in flow.children[1::4] if c != ':'] + for if_test in reversed(potential_ifs): + if search_name.start_pos > if_test.end_pos: + return _check_isinstance_type(context, if_test, search_name) return result -def _check_isinstance_type(evaluator, element, search_name): +def _check_isinstance_type(context, element, search_name): try: - assert element.type == 'power' + assert element.type in ('power', 'atom_expr') # this might be removed if we analyze and, etc assert len(element.children) == 2 first, trailer = element.children - assert isinstance(first, tree.Name) and first.value == 'isinstance' + assert first.type == 'name' and first.value == 'isinstance' assert trailer.type == 'trailer' and trailer.children[0] == '(' assert len(trailer.children) == 3 # arglist stuff arglist = trailer.children[1] - args = param.Arguments(evaluator, arglist, trailer) - lst = list(args.unpack()) + args = TreeArguments(context.evaluator, context, arglist, trailer) + param_list = list(args.unpack()) # Disallow keyword arguments - assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None - name = lst[0][1][0] # first argument, values, first value + assert len(param_list) == 2 + (key1, lazy_context_object), (key2, lazy_context_cls) = param_list + assert key1 is None and key2 is None + call = helpers.call_of_leaf(search_name) + is_instance_call = helpers.call_of_leaf(lazy_context_object.data) # Do a simple get_code comparison. They should just have the same code, # and everything will be all right. - classes = lst[1][1][0] - call = helpers.call_of_name(search_name) - assert name.get_code() == call.get_code() + normalize = context.evaluator.grammar._normalize + assert normalize(is_instance_call) == normalize(call) except AssertionError: - return [] - - result = [] - for typ in evaluator.eval_element(classes): - for typ in (typ.values() if isinstance(typ, iterable.Array) else [typ]): - result += evaluator.execute(typ) - return result - - -def global_names_dict_generator(evaluator, scope, position): - """ - For global name lookups. Yields tuples of (names_dict, position). If the - position is None, the position does not matter anymore in that scope. - - This function is used to include names from outer scopes. For example, when - the current scope is function: - - >>> from jedi._compatibility import u, no_unicode_pprint - >>> from jedi.parser import Parser, load_grammar - >>> parser = Parser(load_grammar(), u(''' - ... x = ['a', 'b', 'c'] - ... def func(): - ... y = None - ... ''')) - >>> scope = parser.module.subscopes[0] - >>> scope - - - `global_names_dict_generator` is a generator. First it yields names from - most inner scope. - - >>> from jedi.evaluate import Evaluator - >>> evaluator = Evaluator(load_grammar()) - >>> scope = evaluator.wrap(scope) - >>> pairs = list(global_names_dict_generator(evaluator, scope, (4, 0))) - >>> no_unicode_pprint(pairs[0]) - ({'func': [], 'y': []}, (4, 0)) - - Then it yields the names from one level "lower". In this example, this - is the most outer scope. As you can see, the position in the tuple is now - None, because typically the whole module is loaded before the function is - called. - - >>> no_unicode_pprint(pairs[1]) - ({'func': [], 'x': []}, None) - - After that we have a few underscore names that are part of the module. - - >>> sorted(pairs[2][0].keys()) - ['__doc__', '__file__', '__name__', '__package__'] - >>> pairs[3] # global names -> there are none in our example. - ({}, None) - >>> pairs[4] # package modules -> Also none. - ({}, None) - - Finally, it yields names from builtin, if `include_builtin` is - true (default). - - >>> pairs[5][0].values() #doctest: +ELLIPSIS - [[], ...] - """ - in_func = False - while scope is not None: - if not (scope.type == 'classdef' and in_func): - # Names in methods cannot be resolved within the class. - - for names_dict in scope.names_dicts(True): - yield names_dict, position - if scope.type == 'funcdef': - # The position should be reset if the current scope is a function. - in_func = True - position = None - scope = evaluator.wrap(scope.get_parent_scope()) - - # Add builtins to the global scope. - for names_dict in compiled.builtin.names_dicts(True): - yield names_dict, None - - -def check_tuple_assignments(types, name): - """ - Checks if tuples are assigned. - """ - for index in name.assignment_indexes(): - new_types = [] - for r in types: - try: - func = r.get_exact_index_types - except AttributeError: - debug.warning("Invalid tuple lookup #%s of result %s in %s", - index, types, name) - else: - try: - new_types += func(index) - except IndexError: - pass - types = new_types - return types - - -def filter_private_variable(scope, origin_node): - """Check if a variable is defined inside the same class or outside.""" - instance = scope.get_parent_scope() - coming_from = origin_node - while coming_from is not None \ - and not isinstance(coming_from, (tree.Class, compiled.CompiledObject)): - coming_from = coming_from.get_parent_scope() + return None - # CompiledObjects don't have double underscore attributes, but Jedi abuses - # those for fakes (builtins.pym -> list). - if isinstance(instance, compiled.CompiledObject): - return instance != coming_from - else: - return isinstance(instance, er.Instance) and instance.base.base != coming_from + context_set = ContextSet() + for cls_or_tup in lazy_context_cls.infer(): + if isinstance(cls_or_tup, iterable.AbstractIterable) and \ + cls_or_tup.array_type == 'tuple': + for lazy_context in cls_or_tup.py__iter__(): + for context in lazy_context.infer(): + context_set |= context.execute_evaluated() + else: + context_set |= cls_or_tup.execute_evaluated() + return context_set diff --git a/pythonFiles/release/jedi/evaluate/flow_analysis.py b/pythonFiles/release/jedi/evaluate/flow_analysis.py index cd3df554fa7e..670b7a71934c 100755 --- a/pythonFiles/release/jedi/evaluate/flow_analysis.py +++ b/pythonFiles/release/jedi/evaluate/flow_analysis.py @@ -1,4 +1,4 @@ -from jedi.parser import tree +from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope class Status(object): @@ -32,51 +32,79 @@ def __repr__(self): UNSURE = Status(None, 'unsure') -def break_check(evaluator, base_scope, stmt, origin_scope=None): - element_scope = evaluator.wrap(stmt.get_parent_scope(include_flows=True)) - # Direct parents get resolved, we filter scopes that are separate branches. - # This makes sense for autocompletion and static analysis. For actual - # Python it doesn't matter, because we're talking about potentially - # unreachable code. - # e.g. `if 0:` would cause all name lookup within the flow make - # unaccessible. This is not a "problem" in Python, because the code is - # never called. In Jedi though, we still want to infer types. - while origin_scope is not None: - if element_scope == origin_scope: - return REACHABLE - origin_scope = origin_scope.parent - return _break_check(evaluator, stmt, base_scope, element_scope) - - -def _break_check(evaluator, stmt, base_scope, element_scope): - element_scope = evaluator.wrap(element_scope) - base_scope = evaluator.wrap(base_scope) - +def _get_flow_scopes(node): + while True: + node = get_parent_scope(node, include_flows=True) + if node is None or is_scope(node): + return + yield node + + +def reachability_check(context, context_scope, node, origin_scope=None): + first_flow_scope = get_parent_scope(node, include_flows=True) + if origin_scope is not None: + origin_flow_scopes = list(_get_flow_scopes(origin_scope)) + node_flow_scopes = list(_get_flow_scopes(node)) + + branch_matches = True + for flow_scope in origin_flow_scopes: + if flow_scope in node_flow_scopes: + node_keyword = get_flow_branch_keyword(flow_scope, node) + origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope) + branch_matches = node_keyword == origin_keyword + if flow_scope.type == 'if_stmt': + if not branch_matches: + return UNREACHABLE + elif flow_scope.type == 'try_stmt': + if not branch_matches and origin_keyword == 'else' \ + and node_keyword == 'except': + return UNREACHABLE + break + + # Direct parents get resolved, we filter scopes that are separate + # branches. This makes sense for autocompletion and static analysis. + # For actual Python it doesn't matter, because we're talking about + # potentially unreachable code. + # e.g. `if 0:` would cause all name lookup within the flow make + # unaccessible. This is not a "problem" in Python, because the code is + # never called. In Jedi though, we still want to infer types. + while origin_scope is not None: + if first_flow_scope == origin_scope and branch_matches: + return REACHABLE + origin_scope = origin_scope.parent + + return _break_check(context, context_scope, first_flow_scope, node) + + +def _break_check(context, context_scope, flow_scope, node): reachable = REACHABLE - if isinstance(element_scope, tree.IfStmt): - if element_scope.node_after_else(stmt): - for check_node in element_scope.check_nodes(): - reachable = _check_if(evaluator, check_node) + if flow_scope.type == 'if_stmt': + if flow_scope.is_node_after_else(node): + for check_node in flow_scope.get_test_nodes(): + reachable = _check_if(context, check_node) if reachable in (REACHABLE, UNSURE): break reachable = reachable.invert() else: - node = element_scope.node_in_which_check_node(stmt) - reachable = _check_if(evaluator, node) - elif isinstance(element_scope, (tree.TryStmt, tree.WhileStmt)): + flow_node = flow_scope.get_corresponding_test_node(node) + if flow_node is not None: + reachable = _check_if(context, flow_node) + elif flow_scope.type in ('try_stmt', 'while_stmt'): return UNSURE # Only reachable branches need to be examined further. if reachable in (UNREACHABLE, UNSURE): return reachable - if base_scope != element_scope and base_scope != element_scope.parent: - return reachable & _break_check(evaluator, stmt, base_scope, element_scope.parent) - return reachable + if context_scope != flow_scope and context_scope != flow_scope.parent: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + return reachable & _break_check(context, context_scope, flow_scope, node) + else: + return reachable -def _check_if(evaluator, node): - types = evaluator.eval_element(node) +def _check_if(context, node): + types = context.eval_node(node) values = set(x.py__bool__() for x in types) if len(values) == 1: return Status.lookup_table[values.pop()] diff --git a/pythonFiles/release/jedi/evaluate/helpers.py b/pythonFiles/release/jedi/evaluate/helpers.py index 4802bee02afe..3b21e01bda9e 100755 --- a/pythonFiles/release/jedi/evaluate/helpers.py +++ b/pythonFiles/release/jedi/evaluate/helpers.py @@ -1,77 +1,51 @@ import copy +import sys +import re +import os from itertools import chain +from contextlib import contextmanager -from jedi.parser import tree +from parso.python import tree +from jedi._compatibility import unicode +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.compiled import CompiledObject -def deep_ast_copy(obj, parent=None, new_elements=None): + +def is_stdlib_path(path): + # Python standard library paths look like this: + # /usr/lib/python3.5/... + # TODO The implementation below is probably incorrect and not complete. + if 'dist-packages' in path or 'site-packages' in path: + return False + + base_path = os.path.join(sys.prefix, 'lib', 'python') + return bool(re.match(re.escape(base_path) + '\d.\d', path)) + + +def deep_ast_copy(obj): """ - Much, much faster than copy.deepcopy, but just for Parser elements (Doesn't - copy parents). + Much, much faster than copy.deepcopy, but just for parser tree nodes. """ - - if new_elements is None: - new_elements = {} - - def copy_node(obj): - # If it's already in the cache, just return it. - try: - return new_elements[obj] - except KeyError: - # Actually copy and set attributes. - new_obj = copy.copy(obj) - new_elements[obj] = new_obj - - # Copy children - new_children = [] - for child in obj.children: - typ = child.type - if typ in ('whitespace', 'operator', 'keyword', 'number', 'string'): - # At the moment we're not actually copying those primitive - # elements, because there's really no need to. The parents are - # obviously wrong, but that's not an issue. - new_child = child - elif typ == 'name': - new_elements[child] = new_child = copy.copy(child) - new_child.parent = new_obj - else: # Is a BaseNode. - new_child = copy_node(child) - new_child.parent = new_obj - new_children.append(new_child) - new_obj.children = new_children - - # Copy the names_dict (if there is one). - try: - names_dict = obj.names_dict - except AttributeError: - pass + # If it's already in the cache, just return it. + new_obj = copy.copy(obj) + + # Copy children + new_children = [] + for child in obj.children: + if isinstance(child, tree.Leaf): + new_child = copy.copy(child) + new_child.parent = new_obj else: - try: - new_obj.names_dict = new_names_dict = {} - except AttributeError: # Impossible to set CompFor.names_dict - pass - else: - for string, names in names_dict.items(): - new_names_dict[string] = [new_elements[n] for n in names] - return new_obj - - if obj.type == 'name': - # Special case of a Name object. - new_elements[obj] = new_obj = copy.copy(obj) - if parent is not None: - new_obj.parent = parent - elif isinstance(obj, tree.BaseNode): - new_obj = copy_node(obj) - if parent is not None: - for child in new_obj.children: - if isinstance(child, (tree.Name, tree.BaseNode)): - child.parent = parent - else: # String literals and so on. - new_obj = obj # Good enough, don't need to copy anything. + new_child = deep_ast_copy(child) + new_child.parent = new_obj + new_children.append(new_child) + new_obj.children = new_children + return new_obj -def call_of_name(name, cut_own_trailer=False): +def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): """ Creates a "call" node that consist of all ``trailer`` and ``power`` objects. E.g. if you call it with ``append``:: @@ -81,99 +55,147 @@ def call_of_name(name, cut_own_trailer=False): You would get a node with the content ``list([]).append`` back. This generates a copy of the original ast node. - """ - par = name - if tree.is_node(par.parent, 'trailer'): - par = par.parent - - power = par.parent - if tree.is_node(power, 'power') and power.children[0] != name \ - and not (power.children[-2] == '**' and - name.start_pos > power.children[-1].start_pos): - par = power - # Now the name must be part of a trailer - index = par.children.index(name.parent) - if index != len(par.children) - 1 or cut_own_trailer: - # Now we have to cut the other trailers away. - par = deep_ast_copy(par) - if not cut_own_trailer: - # Normally we would remove just the stuff after the index, but - # if the option is set remove the index as well. (for goto) - index = index + 1 - par.children[index:] = [] - - return par + If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. -def get_module_names(module, all_scopes): + We use this function for two purposes. Given an expression ``bar.foo``, + we may want to + - infer the type of ``foo`` to offer completions after foo + - infer the type of ``bar`` to be able to jump to the definition of foo + The option ``cut_own_trailer`` must be set to true for the second purpose. """ - Returns a dictionary with name parts as keys and their call paths as - values. - """ - if all_scopes: - dct = module.used_names + trailer = leaf.parent + # The leaf may not be the last or first child, because there exist three + # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples + # we should not match anything more than x. + if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): + if trailer.type == 'atom': + return context.eval_node(trailer) + return context.eval_node(leaf) + + power = trailer.parent + index = power.children.index(trailer) + if cut_own_trailer: + cut = index + else: + cut = index + 1 + + if power.type == 'error_node': + start = index + while True: + start -= 1 + base = power.children[start] + if base.type != 'trailer': + break + trailers = power.children[start + 1: index + 1] else: - dct = module.names_dict - return chain.from_iterable(dct.values()) + base = power.children[0] + trailers = power.children[1:cut] + if base == 'await': + base = trailers[0] + trailers = trailers[1:] -class FakeImport(tree.ImportName): - def __init__(self, name, parent, level=0): - super(FakeImport, self).__init__([]) - self.parent = parent - self._level = level - self.name = name + values = context.eval_node(base) + from jedi.evaluate.syntax_tree import eval_trailer + for trailer in trailers: + values = eval_trailer(context, values, trailer) + return values - def get_defined_names(self): - return [self.name] - def aliases(self): - return {} +def call_of_leaf(leaf): + """ + Creates a "call" node that consist of all ``trailer`` and ``power`` + objects. E.g. if you call it with ``append``:: - @property - def level(self): - return self._level + list([]).append(3) or None - @property - def start_pos(self): - return 0, 0 + You would get a node with the content ``list([]).append`` back. - def paths(self): - return [[self.name]] + This generates a copy of the original ast node. - def is_definition(self): - return True + If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. + """ + # TODO this is the old version of this call. Try to remove it. + trailer = leaf.parent + # The leaf may not be the last or first child, because there exist three + # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples + # we should not match anything more than x. + if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): + if trailer.type == 'atom': + return trailer + return leaf + + power = trailer.parent + index = power.children.index(trailer) + + new_power = copy.copy(power) + new_power.children = list(new_power.children) + new_power.children[index + 1:] = [] + + if power.type == 'error_node': + start = index + while True: + start -= 1 + if power.children[start].type != 'trailer': + break + transformed = tree.Node('power', power.children[start:]) + transformed.parent = power.parent + return transformed + + return power + + +def get_names_of_node(node): + try: + children = node.children + except AttributeError: + if node.type == 'name': + return [node] + else: + return [] + else: + return list(chain.from_iterable(get_names_of_node(c) for c in children)) -class FakeName(tree.Name): - def __init__(self, name_str, parent=None, start_pos=(0, 0), is_definition=None): - """ - In case is_definition is defined (not None), that bool value will be - returned. - """ - super(FakeName, self).__init__(tree.zero_position_modifier, name_str, start_pos) - self.parent = parent - self._is_definition = is_definition +def get_module_names(module, all_scopes): + """ + Returns a dictionary with name parts as keys and their call paths as + values. + """ + names = chain.from_iterable(module.get_used_names().values()) + if not all_scopes: + # We have to filter all the names that don't have the module as a + # parent_scope. There's None as a parent, because nodes in the module + # node have the parent module and not suite as all the others. + # Therefore it's important to catch that case. + names = [n for n in names if get_parent_scope(n).parent in (module, None)] + return names - def get_definition(self): - return self.parent - def is_definition(self): - if self._is_definition is None: - return super(FakeName, self).is_definition() - else: - return self._is_definition +@contextmanager +def predefine_names(context, flow_scope, dct): + predefined = context.predefined_names + if flow_scope in predefined: + raise NotImplementedError('Why does this happen?') + predefined[flow_scope] = dct + try: + yield + finally: + del predefined[flow_scope] + + +def is_compiled(context): + return isinstance(context, CompiledObject) + + +def is_string(context): + return is_compiled(context) and isinstance(context.obj, (str, unicode)) -class LazyName(FakeName): - def __init__(self, name, parent_callback, is_definition=None): - super(LazyName, self).__init__(name, is_definition=is_definition) - self._parent_callback = parent_callback +def is_literal(context): + return is_number(context) or is_string(context) - @property - def parent(self): - return self._parent_callback() - @parent.setter - def parent(self, value): - pass # Do nothing, super classes can try to set the parent. +def is_number(context): + return is_compiled(context) and isinstance(context.obj, (int, float)) diff --git a/pythonFiles/release/jedi/evaluate/imports.py b/pythonFiles/release/jedi/evaluate/imports.py index c14a50c0dce0..ecf656b1a676 100755 --- a/pythonFiles/release/jedi/evaluate/imports.py +++ b/pythonFiles/release/jedi/evaluate/imports.py @@ -15,110 +15,85 @@ import os import pkgutil import sys -from itertools import chain -from jedi._compatibility import find_module, unicode -from jedi import common +from parso.python import tree +from parso.tree import search_ancestor +from parso.cache import parser_cache +from parso import python_bytes_to_unicode + +from jedi._compatibility import find_module, unicode, ImplicitNSInfo from jedi import debug -from jedi import cache -from jedi.parser import fast -from jedi.parser import tree +from jedi import settings from jedi.evaluate import sys_path from jedi.evaluate import helpers -from jedi import settings -from jedi.common import source_to_unicode from jedi.evaluate import compiled from jedi.evaluate import analysis -from jedi.evaluate.cache import memoize_default, NO_DEFAULT - - -def completion_names(evaluator, imp, pos): - name = imp.name_for_position(pos) - module = evaluator.wrap(imp.get_parent_until()) - if name is None: - level = 0 - for node in imp.children: - if node.end_pos <= pos: - if node in ('.', '...'): - level += len(node.value) - import_path = [] +from jedi.evaluate.utils import unite +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.filters import AbstractNameDefinition +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS + + +# This memoization is needed, because otherwise we will infinitely loop on +# certain imports. +@evaluator_method_cache(default=NO_CONTEXTS) +def infer_import(context, tree_name, is_goto=False): + module_context = context.get_root_context() + import_node = search_ancestor(tree_name, 'import_name', 'import_from') + import_path = import_node.get_path_for_name(tree_name) + from_import_name = None + evaluator = context.evaluator + try: + from_names = import_node.get_from_names() + except AttributeError: + # Is an import_name + pass else: - # Completion on an existing name. - - # The import path needs to be reduced by one, because we're completing. - import_path = imp.path_for_name(name)[:-1] - level = imp.level - - importer = Importer(evaluator, tuple(import_path), module, level) - if isinstance(imp, tree.ImportFrom): - c = imp.children - only_modules = c[c.index('import')].start_pos >= pos - else: - only_modules = True - return importer.completion_names(evaluator, only_modules) - - -class ImportWrapper(tree.Base): - def __init__(self, evaluator, name): - self._evaluator = evaluator - self._name = name - - self._import = name.get_parent_until(tree.Import) - self.import_path = self._import.path_for_name(name) - - @memoize_default() - def follow(self, is_goto=False): - if self._evaluator.recursion_detector.push_stmt(self._import): - # check recursion - return [] - - try: - module = self._evaluator.wrap(self._import.get_parent_until()) - import_path = self._import.path_for_name(self._name) - from_import_name = None - try: - from_names = self._import.get_from_names() - except AttributeError: - # Is an import_name - pass - else: - if len(from_names) + 1 == len(import_path): - # We have to fetch the from_names part first and then check - # if from_names exists in the modules. - from_import_name = import_path[-1] - import_path = from_names - - importer = Importer(self._evaluator, tuple(import_path), - module, self._import.level) - + if len(from_names) + 1 == len(import_path): + # We have to fetch the from_names part first and then check + # if from_names exists in the modules. + from_import_name = import_path[-1] + import_path = from_names + + importer = Importer(evaluator, tuple(import_path), + module_context, import_node.level) + + types = importer.follow() + + #if import_node.is_nested() and not self.nested_resolve: + # scopes = [NestedImportModule(module, import_node)] + + if not types: + return NO_CONTEXTS + + if from_import_name is not None: + types = unite( + t.py__getattribute__( + from_import_name, + name_context=context, + is_goto=is_goto, + analysis_errors=False + ) + for t in types + ) + if not is_goto: + types = ContextSet.from_set(types) + + if not types: + path = import_path + [from_import_name] + importer = Importer(evaluator, tuple(path), + module_context, import_node.level) types = importer.follow() + # goto only accepts `Name` + if is_goto: + types = set(s.name for s in types) + else: + # goto only accepts `Name` + if is_goto: + types = set(s.name for s in types) - #if self._import.is_nested() and not self.nested_resolve: - # scopes = [NestedImportModule(module, self._import)] - - if from_import_name is not None: - types = list(chain.from_iterable( - self._evaluator.find_types(t, unicode(from_import_name), - is_goto=is_goto) - for t in types)) - - if not types: - path = import_path + [from_import_name] - importer = Importer(self._evaluator, tuple(path), - module, self._import.level) - types = importer.follow() - # goto only accepts `Name` - if is_goto: - types = [s.name for s in types] - else: - # goto only accepts `Name` - if is_goto: - types = [s.name for s in types] - - debug.dbg('after import: %s', types) - finally: - self._evaluator.recursion_detector.pop_stmt() - return types + debug.dbg('after import: %s', types) + return types class NestedImportModule(tree.Module): @@ -153,10 +128,10 @@ def __repr__(self): self._nested_import) -def _add_error(evaluator, name, message=None): +def _add_error(context, name, message=None): + # Should be a name, not a string! if hasattr(name, 'parent'): - # Should be a name, not a string! - analysis.add(evaluator, 'import-error', name, message) + analysis.add(context, 'import-error', name, message) def get_init_path(directory_path): @@ -171,8 +146,40 @@ def get_init_path(directory_path): return None +class ImportName(AbstractNameDefinition): + start_pos = (1, 0) + _level = 0 + + def __init__(self, parent_context, string_name): + self.parent_context = parent_context + self.string_name = string_name + + def infer(self): + return Importer( + self.parent_context.evaluator, + [self.string_name], + self.parent_context, + level=self._level, + ).follow() + + def goto(self): + return [m.name for m in self.infer()] + + def get_root_context(self): + # Not sure if this is correct. + return self.parent_context.get_root_context() + + @property + def api_type(self): + return 'module' + + +class SubModuleName(ImportName): + _level = 1 + + class Importer(object): - def __init__(self, evaluator, import_path, module, level=0): + def __init__(self, evaluator, import_path, module_context, level=0): """ An implementation similar to ``__import__``. Use `follow` to actually follow the imports. @@ -188,47 +195,65 @@ def __init__(self, evaluator, import_path, module, level=0): debug.speed('import %s' % (import_path,)) self._evaluator = evaluator self.level = level - self.module = module + self.module_context = module_context try: - self.file_path = module.py__file__() + self.file_path = module_context.py__file__() except AttributeError: # Can be None for certain compiled modules like 'builtins'. self.file_path = None if level: - base = module.py__package__().split('.') + base = module_context.py__package__().split('.') if base == ['']: base = [] if level > len(base): - path = module.py__file__() - import_path = list(import_path) - for i in range(level): - path = os.path.dirname(path) - dir_name = os.path.basename(path) - # This is not the proper way to do relative imports. However, since - # Jedi cannot be sure about the entry point, we just calculate an - # absolute path here. - if dir_name: - import_path.insert(0, dir_name) - else: - _add_error(self._evaluator, import_path[-1]) - import_path = [] - # TODO add import error. - debug.warning('Attempted relative import beyond top-level package.') + path = module_context.py__file__() + if path is not None: + import_path = list(import_path) + p = path + for i in range(level): + p = os.path.dirname(p) + dir_name = os.path.basename(p) + # This is not the proper way to do relative imports. However, since + # Jedi cannot be sure about the entry point, we just calculate an + # absolute path here. + if dir_name: + # TODO those sys.modules modifications are getting + # really stupid. this is the 3rd time that we're using + # this. We should probably refactor. + if path.endswith(os.path.sep + 'os.py'): + import_path.insert(0, 'os') + else: + import_path.insert(0, dir_name) + else: + _add_error(module_context, import_path[-1]) + import_path = [] + # TODO add import error. + debug.warning('Attempted relative import beyond top-level package.') + # If no path is defined in the module we have no ideas where we + # are in the file system. Therefore we cannot know what to do. + # In this case we just let the path there and ignore that it's + # a relative path. Not sure if that's a good idea. else: # Here we basically rewrite the level to 0. - import_path = tuple(base) + import_path + base = tuple(base) + if level > 1: + base = base[:-level + 1] + + import_path = base + tuple(import_path) self.import_path = import_path @property def str_import_path(self): """Returns the import path as pure strings instead of `Name`.""" - return tuple(str(name) for name in self.import_path) + return tuple( + name.value if isinstance(name, tree.Name) else name + for name in self.import_path) - @memoize_default() def sys_path_with_modifications(self): in_path = [] - sys_path_mod = list(sys_path.sys_path_with_modifications(self._evaluator, self.module)) + sys_path_mod = self._evaluator.project.sys_path \ + + sys_path.check_sys_path_modifications(self.module_context) if self.file_path is not None: # If you edit e.g. gunicorn, there will be imports like this: # `from gunicorn import something`. But gunicorn is not in the @@ -245,17 +270,19 @@ def sys_path_with_modifications(self): return in_path + sys_path_mod - @memoize_default(NO_DEFAULT) def follow(self): if not self.import_path: - return [] + return NO_CONTEXTS return self._do_import(self.import_path, self.sys_path_with_modifications()) def _do_import(self, import_path, sys_path): """ This method is very similar to importlib's `_gcd_import`. """ - import_parts = [str(i) for i in import_path] + import_parts = [ + i.value if isinstance(i, tree.Name) else i + for i in import_path + ] # Handle "magic" Flask extension imports: # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. @@ -271,7 +298,7 @@ def _do_import(self, import_path, sys_path): module_name = '.'.join(import_parts) try: - return [self._evaluator.modules[module_name]] + return ContextSet(self._evaluator.modules[module_name]) except KeyError: pass @@ -280,43 +307,44 @@ def _do_import(self, import_path, sys_path): # the module cache. bases = self._do_import(import_path[:-1], sys_path) if not bases: - return [] + return NO_CONTEXTS # We can take the first element, because only the os special # case yields multiple modules, which is not important for # further imports. - base = bases[0] + parent_module = list(bases)[0] # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. - if [str(i) for i in import_path] == ['os', 'path']: - return self._evaluator.find_types(base, 'path') + if import_parts == ['os', 'path']: + return parent_module.py__getattribute__('path') try: - # It's possible that by giving it always the sys path (and not - # the __path__ attribute of the parent, we get wrong results - # and nested namespace packages don't work. But I'm not sure. - paths = base.py__path__(sys_path) + method = parent_module.py__path__ except AttributeError: # The module is not a package. - _add_error(self._evaluator, import_path[-1]) - return [] + _add_error(self.module_context, import_path[-1]) + return NO_CONTEXTS else: + paths = method() debug.dbg('search_module %s in paths %s', module_name, paths) for path in paths: # At the moment we are only using one path. So this is # not important to be correct. try: + if not isinstance(path, list): + path = [path] module_file, module_path, is_pkg = \ - find_module(import_parts[-1], [path]) + find_module(import_parts[-1], path, fullname=module_name) break except ImportError: module_path = None if module_path is None: - _add_error(self._evaluator, import_path[-1]) - return [] + _add_error(self.module_context, import_path[-1]) + return NO_CONTEXTS else: + parent_module = None try: debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) # Override the sys.path. It works only good that way. @@ -324,35 +352,51 @@ def _do_import(self, import_path, sys_path): sys.path, temp = sys_path, sys.path try: module_file, module_path, is_pkg = \ - find_module(import_parts[-1]) + find_module(import_parts[-1], fullname=module_name) finally: sys.path = temp except ImportError: # The module is not a package. - _add_error(self._evaluator, import_path[-1]) - return [] + _add_error(self.module_context, import_path[-1]) + return NO_CONTEXTS - source = None + code = None if is_pkg: # In this case, we don't have a file yet. Search for the # __init__ file. - module_path = get_init_path(module_path) + if module_path.endswith(('.zip', '.egg')): + code = module_file.loader.get_source(module_name) + else: + module_path = get_init_path(module_path) elif module_file: - source = module_file.read() + code = module_file.read() module_file.close() - if module_file is None and not module_path.endswith('.py'): - module = compiled.load_module(module_path) + if isinstance(module_path, ImplicitNSInfo): + from jedi.evaluate.context.namespace import ImplicitNamespaceContext + fullname, paths = module_path.name, module_path.paths + module = ImplicitNamespaceContext(self._evaluator, fullname=fullname) + module.paths = paths + elif module_file is None and not module_path.endswith(('.py', '.zip', '.egg')): + module = compiled.load_module(self._evaluator, module_path) else: - module = _load_module(self._evaluator, module_path, source, sys_path) + module = _load_module(self._evaluator, module_path, code, sys_path, parent_module) + + if module is None: + # The file might raise an ImportError e.g. and therefore not be + # importable. + return NO_CONTEXTS self._evaluator.modules[module_name] = module - return [module] + return ContextSet(module) - def _generate_name(self, name): - return helpers.FakeName(name, parent=self.module) + def _generate_name(self, name, in_module=None): + # Create a pseudo import to be able to follow them. + if in_module is None: + return ImportName(self.module_context, name) + return SubModuleName(in_module, name) - def _get_module_names(self, search_path=None): + def _get_module_names(self, search_path=None, in_module=None): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. @@ -360,13 +404,13 @@ def _get_module_names(self, search_path=None): names = [] # add builtin module names - if search_path is None: + if search_path is None and in_module is None: names += [self._generate_name(name) for name in sys.builtin_module_names] if search_path is None: search_path = self.sys_path_with_modifications() for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): - names.append(self._generate_name(name)) + names.append(self._generate_name(name, in_module=in_module)) return names def completion_names(self, evaluator, only_modules=False): @@ -374,14 +418,15 @@ def completion_names(self, evaluator, only_modules=False): :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ - from jedi.evaluate import finder + from jedi.evaluate.context import ModuleContext + from jedi.evaluate.context.namespace import ImplicitNamespaceContext names = [] if self.import_path: # flask if self.str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): - modname = str(mod) + modname = mod.string_name if modname.startswith('flask_'): extname = modname[len('flask_'):] names.append(self._generate_name(extname)) @@ -391,14 +436,18 @@ def completion_names(self, evaluator, only_modules=False): if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) - for scope in self.follow(): + for context in self.follow(): # Non-modules are not completable. - if not scope.type == 'file_input': # not a module + if context.api_type != 'module': # not a module continue - # namespace packages - if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'): - paths = scope.py__path__(self.sys_path_with_modifications()) + if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'): + paths = context.py__path__() + names += self._get_module_names(paths, in_module=context) + + # implicit namespace packages + elif isinstance(context, ImplicitNamespaceContext): + paths = context.paths names += self._get_module_names(paths) if only_modules: @@ -407,16 +456,12 @@ def completion_names(self, evaluator, only_modules=False): if ('os',) == self.str_import_path and not self.level: # os.path is a hardcoded exception, because it's a # ``sys.modules`` modification. - names.append(self._generate_name('path')) + names.append(self._generate_name('path', context)) continue - for names_dict in scope.names_dicts(search_global=False): - _names = list(chain.from_iterable(names_dict.values())) - if not _names: - continue - _names = finder.filter_definition_names(_names, scope) - names += _names + for filter in context.get_filters(search_global=False): + names += filter.values() else: # Empty import path=completion after import if not self.level: @@ -431,25 +476,22 @@ def completion_names(self, evaluator, only_modules=False): return names -def _load_module(evaluator, path=None, source=None, sys_path=None): - def load(source): - dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) - if path is not None and path.endswith('.py') \ - and not dotted_path in settings.auto_import_modules: - if source is None: - with open(path, 'rb') as f: - source = f.read() - else: - return compiled.load_module(path) - p = path - p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p) - cache.save_parser(path, p) - return p.module +def _load_module(evaluator, path=None, code=None, sys_path=None, parent_module=None): + if sys_path is None: + sys_path = evaluator.project.sys_path + + dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) + if path is not None and path.endswith(('.py', '.zip', '.egg')) \ + and dotted_path not in settings.auto_import_modules: - cached = cache.load_parser(path) - module = load(source) if cached is None else cached.module - module = evaluator.wrap(module) - return module + module_node = evaluator.grammar.parse( + code=code, path=path, cache=True, diff_cache=True, + cache_path=settings.cache_directory) + + from jedi.evaluate.context import ModuleContext + return ModuleContext(evaluator, module_node, path=path) + else: + return compiled.load_module(evaluator, path) def add_module(evaluator, module_name, module): @@ -461,47 +503,68 @@ def add_module(evaluator, module_name, module): evaluator.modules[module_name] = module -def get_modules_containing_name(evaluator, mods, name): +def get_modules_containing_name(evaluator, modules, name): """ Search a name in the directories of modules. """ + from jedi.evaluate.context import ModuleContext + def check_directories(paths): + for p in paths: + if p is not None: + # We need abspath, because the seetings paths might not already + # have been converted to absolute paths. + d = os.path.dirname(os.path.abspath(p)) + for file_name in os.listdir(d): + path = os.path.join(d, file_name) + if file_name.endswith('.py'): + yield path + def check_python_file(path): try: - return cache.parser_cache[path].parser.module + # TODO I don't think we should use the cache here?! + node_cache_item = parser_cache[evaluator.grammar._hashed][path] except KeyError: try: return check_fs(path) except IOError: return None + else: + module_node = node_cache_item.node + return ModuleContext(evaluator, module_node, path=path) def check_fs(path): with open(path, 'rb') as f: - source = source_to_unicode(f.read()) - if name in source: - module_name = os.path.basename(path)[:-3] # Remove `.py`. - module = _load_module(evaluator, path, source) - add_module(evaluator, module_name, module) + code = python_bytes_to_unicode(f.read(), errors='replace') + if name in code: + module = _load_module(evaluator, path, code) + + module_name = sys_path.dotted_path_in_sys_path(evaluator.project.sys_path, path) + if module_name is not None: + add_module(evaluator, module_name, module) return module # skip non python modules - mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject)) - mod_paths = set() - for m in mods: - mod_paths.add(m.path) + used_mod_paths = set() + for m in modules: + try: + path = m.py__file__() + except AttributeError: + pass + else: + used_mod_paths.add(path) yield m - if settings.dynamic_params_for_other_modules: - paths = set(settings.additional_dynamic_modules) - for p in mod_paths: - if p is not None: - d = os.path.dirname(p) - for entry in os.listdir(d): - if entry not in mod_paths: - if entry.endswith('.py'): - paths.add(d + os.path.sep + entry) - - for p in sorted(paths): - # make testing easier, sort it - same results on every interpreter - c = check_python_file(p) - if c is not None and c not in mods and not isinstance(c, compiled.CompiledObject): - yield c + if not settings.dynamic_params_for_other_modules: + return + + additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules) + # Check the directories of used modules. + paths = (additional | set(check_directories(used_mod_paths))) \ + - used_mod_paths + + # Sort here to make issues less random. + for p in sorted(paths): + # make testing easier, sort it - same results on every interpreter + m = check_python_file(p) + if m is not None and not isinstance(m, compiled.CompiledObject): + yield m diff --git a/pythonFiles/release/jedi/evaluate/iterable.py b/pythonFiles/release/jedi/evaluate/iterable.py deleted file mode 100755 index d14a4c5fd2fb..000000000000 --- a/pythonFiles/release/jedi/evaluate/iterable.py +++ /dev/null @@ -1,631 +0,0 @@ -""" -Contains all classes and functions to deal with lists, dicts, generators and -iterators in general. - -Array modifications -******************* - -If the content of an array (``set``/``list``) is requested somewhere, the -current module will be checked for appearances of ``arr.append``, -``arr.insert``, etc. If the ``arr`` name points to an actual array, the -content will be added - -This can be really cpu intensive, as you can imagine. Because |jedi| has to -follow **every** ``append`` and check wheter it's the right array. However this -works pretty good, because in *slow* cases, the recursion detector and other -settings will stop this process. - -It is important to note that: - -1. Array modfications work only in the current module. -2. Jedi only checks Array additions; ``list.pop``, etc are ignored. -""" -from itertools import chain - -from jedi import common -from jedi import debug -from jedi import settings -from jedi._compatibility import use_metaclass, is_py3, unicode -from jedi.parser import tree -from jedi.evaluate import compiled -from jedi.evaluate import helpers -from jedi.evaluate.cache import CachedMetaClass, memoize_default -from jedi.evaluate import analysis - - -def unite(iterable): - """Turns a two dimensional array into a one dimensional.""" - return list(chain.from_iterable(iterable)) - - -class IterableWrapper(tree.Base): - def is_class(self): - return False - - -class GeneratorMixin(object): - @memoize_default() - def names_dicts(self, search_global=False): # is always False - dct = {} - executes_generator = '__next__', 'send', 'next' - for names in compiled.generator_obj.names_dict.values(): - for name in names: - if name.value in executes_generator: - parent = GeneratorMethod(self, name.parent) - dct[name.value] = [helpers.FakeName(name.name, parent, is_definition=True)] - else: - dct[name.value] = [name] - yield dct - - def get_index_types(self, evaluator, index_array): - #debug.warning('Tried to get array access on a generator: %s', self) - analysis.add(self._evaluator, 'type-error-generator', index_array) - return [] - - def get_exact_index_types(self, index): - """ - Exact lookups are used for tuple lookups, which are perfectly fine if - used with generators. - """ - return [self.iter_content()[index]] - - def py__bool__(self): - return True - - -class Generator(use_metaclass(CachedMetaClass, IterableWrapper, GeneratorMixin)): - """Handling of `yield` functions.""" - def __init__(self, evaluator, func, var_args): - super(Generator, self).__init__() - self._evaluator = evaluator - self.func = func - self.var_args = var_args - - def iter_content(self): - """ returns the content of __iter__ """ - # Directly execute it, because with a normal call to py__call__ a - # Generator will be returned. - from jedi.evaluate.representation import FunctionExecution - f = FunctionExecution(self._evaluator, self.func, self.var_args) - return f.get_return_types(check_yields=True) - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'get_imports', - 'doc', 'docstr', 'get_parent_until', - 'get_code', 'subscopes']: - raise AttributeError("Accessing %s of %s is not allowed." - % (self, name)) - return getattr(self.func, name) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.func) - - -class GeneratorMethod(IterableWrapper): - """``__next__`` and ``send`` methods.""" - def __init__(self, generator, builtin_func): - self._builtin_func = builtin_func - self._generator = generator - - def py__call__(self, evaluator, params): - # TODO add TypeError if params are given. - return self._generator.iter_content() - - def __getattr__(self, name): - return getattr(self._builtin_func, name) - - -class Comprehension(IterableWrapper): - @staticmethod - def from_atom(evaluator, atom): - mapping = { - '(': GeneratorComprehension, - '[': ListComprehension - } - return mapping[atom.children[0]](evaluator, atom) - - def __init__(self, evaluator, atom): - self._evaluator = evaluator - self._atom = atom - - @memoize_default() - def eval_node(self): - """ - The first part `x + 1` of the list comprehension: - - [x + 1 for x in foo] - """ - comprehension = self._atom.children[1] - # For nested comprehensions we need to search the last one. - last = comprehension.children[-1] - last_comp = comprehension.children[1] - while True: - if isinstance(last, tree.CompFor): - last_comp = last - elif not tree.is_node(last, 'comp_if'): - break - last = last.children[-1] - - return helpers.deep_ast_copy(comprehension.children[0], parent=last_comp) - - def get_exact_index_types(self, index): - return [self._evaluator.eval_element(self.eval_node())[index]] - - def __repr__(self): - return "" % (type(self).__name__, self._atom) - - -class ArrayMixin(object): - @memoize_default() - def names_dicts(self, search_global=False): # Always False. - # `array.type` is a string with the type, e.g. 'list'. - scope = self._evaluator.find_types(compiled.builtin, self.type)[0] - # builtins only have one class -> [0] - scope = self._evaluator.execute(scope, (AlreadyEvaluated((self,)),))[0] - return scope.names_dicts(search_global) - - def py__bool__(self): - return None # We don't know the length, because of appends. - - -class ListComprehension(Comprehension, ArrayMixin): - type = 'list' - - def get_index_types(self, evaluator, index): - return self.iter_content() - - def iter_content(self): - return self._evaluator.eval_element(self.eval_node()) - - @property - def name(self): - return FakeSequence(self._evaluator, [], 'list').name - - -class GeneratorComprehension(Comprehension, GeneratorMixin): - def iter_content(self): - return self._evaluator.eval_element(self.eval_node()) - - -class Array(IterableWrapper, ArrayMixin): - mapping = {'(': 'tuple', - '[': 'list', - '{': 'dict'} - - def __init__(self, evaluator, atom): - self._evaluator = evaluator - self.atom = atom - self.type = Array.mapping[atom.children[0]] - """The builtin name of the array (list, set, tuple or dict).""" - - c = self.atom.children - array_node = c[1] - if self.type == 'dict' and array_node != '}' \ - and (not hasattr(array_node, 'children') - or ':' not in array_node.children): - self.type = 'set' - - @property - def name(self): - return helpers.FakeName(self.type, parent=self) - - @memoize_default() - def get_index_types(self, evaluator, index=()): - """ - Get the types of a specific index or all, if not given. - - :param index: A subscriptlist node (or subnode). - """ - indexes = create_indexes_or_slices(evaluator, index) - lookup_done = False - types = [] - for index in indexes: - if isinstance(index, Slice): - types += [self] - lookup_done = True - elif isinstance(index, compiled.CompiledObject) \ - and isinstance(index.obj, (int, str, unicode)): - with common.ignored(KeyError, IndexError, TypeError): - types += self.get_exact_index_types(index.obj) - lookup_done = True - - return types if lookup_done else self.values() - - @memoize_default() - def values(self): - result = unite(self._evaluator.eval_element(v) for v in self._values()) - result += check_array_additions(self._evaluator, self) - return result - - def get_exact_index_types(self, mixed_index): - """ Here the index is an int/str. Raises IndexError/KeyError """ - if self.type == 'dict': - for key, values in self._items(): - # Because we only want the key to be a string. - keys = self._evaluator.eval_element(key) - - for k in keys: - if isinstance(k, compiled.CompiledObject) \ - and mixed_index == k.obj: - for value in values: - return self._evaluator.eval_element(value) - raise KeyError('No key found in dictionary %s.' % self) - - # Can raise an IndexError - return self._evaluator.eval_element(self._items()[mixed_index]) - - def iter_content(self): - return self.values() - - @common.safe_property - def parent(self): - return compiled.builtin - - def get_parent_until(self): - return compiled.builtin - - def __getattr__(self, name): - if name not in ['start_pos', 'get_only_subelement', 'parent', - 'get_parent_until', 'items']: - raise AttributeError('Strange access on %s: %s.' % (self, name)) - return getattr(self.atom, name) - - def _values(self): - """Returns a list of a list of node.""" - if self.type == 'dict': - return list(chain.from_iterable(v for k, v in self._items())) - else: - return self._items() - - def _items(self): - c = self.atom.children - array_node = c[1] - if array_node in (']', '}', ')'): - return [] # Direct closing bracket, doesn't contain items. - - if tree.is_node(array_node, 'testlist_comp'): - return array_node.children[::2] - elif tree.is_node(array_node, 'dictorsetmaker'): - kv = [] - iterator = iter(array_node.children) - for key in iterator: - op = next(iterator, None) - if op is None or op == ',': - kv.append(key) # A set. - elif op == ':': # A dict. - kv.append((key, [next(iterator)])) - next(iterator, None) # Possible comma. - else: - raise NotImplementedError('dict/set comprehensions') - return kv - else: - return [array_node] - - def __iter__(self): - return iter(self._items()) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.atom) - - -class _FakeArray(Array): - def __init__(self, evaluator, container, type): - self.type = type - self._evaluator = evaluator - self.atom = container - - -class ImplicitTuple(_FakeArray): - def __init__(self, evaluator, testlist): - super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple') - self._testlist = testlist - - def _items(self): - return self._testlist.children[::2] - - -class FakeSequence(_FakeArray): - def __init__(self, evaluator, sequence_values, type): - super(FakeSequence, self).__init__(evaluator, sequence_values, type) - self._sequence_values = sequence_values - - def _items(self): - return self._sequence_values - - def get_exact_index_types(self, index): - value = self._sequence_values[index] - return self._evaluator.eval_element(value) - - -class AlreadyEvaluated(frozenset): - """A simple container to add already evaluated objects to an array.""" - def get_code(self): - # For debugging purposes. - return str(self) - - -class MergedNodes(frozenset): - pass - - -class FakeDict(_FakeArray): - def __init__(self, evaluator, dct): - super(FakeDict, self).__init__(evaluator, dct, 'dict') - self._dct = dct - - def get_exact_index_types(self, index): - return list(chain.from_iterable(self._evaluator.eval_element(v) - for v in self._dct[index])) - - def _items(self): - return self._dct.items() - - -class MergedArray(_FakeArray): - def __init__(self, evaluator, arrays): - super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].type) - self._arrays = arrays - - def get_exact_index_types(self, mixed_index): - raise IndexError - - def values(self): - return list(chain(*(a.values() for a in self._arrays))) - - def __iter__(self): - for array in self._arrays: - for a in array: - yield a - - def __len__(self): - return sum(len(a) for a in self._arrays) - - -def get_iterator_types(inputs): - """Returns the types of any iterator (arrays, yields, __iter__, etc).""" - iterators = [] - # Take the first statement (for has always only - # one, remember `in`). And follow it. - for it in inputs: - if isinstance(it, (Generator, Array, ArrayInstance, Comprehension)): - iterators.append(it) - else: - if not hasattr(it, 'execute_subscope_by_name'): - debug.warning('iterator/for loop input wrong: %s', it) - continue - try: - iterators += it.execute_subscope_by_name('__iter__') - except KeyError: - debug.warning('iterators: No __iter__ method found.') - - result = [] - from jedi.evaluate.representation import Instance - for it in iterators: - if isinstance(it, Array): - # Array is a little bit special, since this is an internal array, - # but there's also the list builtin, which is another thing. - result += it.values() - elif isinstance(it, Instance): - # __iter__ returned an instance. - name = '__next__' if is_py3 else 'next' - try: - result += it.execute_subscope_by_name(name) - except KeyError: - debug.warning('Instance has no __next__ function in %s.', it) - else: - # TODO this is not correct, __iter__ can return arbitrary input! - # Is a generator. - result += it.iter_content() - return result - - -def check_array_additions(evaluator, array): - """ Just a mapper function for the internal _check_array_additions """ - if array.type not in ('list', 'set'): - # TODO also check for dict updates - return [] - - is_list = array.type == 'list' - try: - current_module = array.atom.get_parent_until() - except AttributeError: - # If there's no get_parent_until, it's a FakeSequence or another Fake - # type. Those fake types are used inside Jedi's engine. No values may - # be added to those after their creation. - return [] - return _check_array_additions(evaluator, array, current_module, is_list) - - -@memoize_default([], evaluator_is_first_arg=True) -def _check_array_additions(evaluator, compare_array, module, is_list): - """ - Checks if a `Array` has "add" (append, insert, extend) statements: - - >>> a = [""] - >>> a.append(1) - """ - if not settings.dynamic_array_additions or isinstance(module, compiled.CompiledObject): - return [] - - def check_additions(arglist, add_name): - params = list(param.Arguments(evaluator, arglist).unpack()) - result = [] - if add_name in ['insert']: - params = params[1:] - if add_name in ['append', 'add', 'insert']: - for key, nodes in params: - result += unite(evaluator.eval_element(node) for node in nodes) - elif add_name in ['extend', 'update']: - for key, nodes in params: - iterators = unite(evaluator.eval_element(node) for node in nodes) - result += get_iterator_types(iterators) - return result - - from jedi.evaluate import representation as er, param - - def get_execution_parent(element): - """ Used to get an Instance/FunctionExecution parent """ - if isinstance(element, Array): - node = element.atom - else: - # Is an Instance with an - # Arguments([AlreadyEvaluated([ArrayInstance])]) inside - # Yeah... I know... It's complicated ;-) - node = list(element.var_args.argument_node[0])[0].var_args.trailer - if isinstance(node, er.InstanceElement): - return node - return node.get_parent_until(er.FunctionExecution) - - temp_param_add, settings.dynamic_params_for_other_modules = \ - settings.dynamic_params_for_other_modules, False - - search_names = ['append', 'extend', 'insert'] if is_list else ['add', 'update'] - comp_arr_parent = get_execution_parent(compare_array) - - added_types = [] - for add_name in search_names: - try: - possible_names = module.used_names[add_name] - except KeyError: - continue - else: - for name in possible_names: - # Check if the original scope is an execution. If it is, one - # can search for the same statement, that is in the module - # dict. Executions are somewhat special in jedi, since they - # literally copy the contents of a function. - if isinstance(comp_arr_parent, er.FunctionExecution): - if comp_arr_parent.start_pos < name.start_pos < comp_arr_parent.end_pos: - name = comp_arr_parent.name_for_position(name.start_pos) - else: - # Don't check definitions that are not defined in the - # same function. This is not "proper" anyway. It also - # improves Jedi's speed for array lookups, since we - # don't have to check the whole source tree anymore. - continue - trailer = name.parent - power = trailer.parent - trailer_pos = power.children.index(trailer) - try: - execution_trailer = power.children[trailer_pos + 1] - except IndexError: - continue - else: - if execution_trailer.type != 'trailer' \ - or execution_trailer.children[0] != '(' \ - or execution_trailer.children[1] == ')': - continue - power = helpers.call_of_name(name, cut_own_trailer=True) - # InstanceElements are special, because they don't get copied, - # but have this wrapper around them. - if isinstance(comp_arr_parent, er.InstanceElement): - power = er.get_instance_el(evaluator, comp_arr_parent.instance, power) - - if evaluator.recursion_detector.push_stmt(power): - # Check for recursion. Possible by using 'extend' in - # combination with function calls. - continue - if compare_array in evaluator.eval_element(power): - # The arrays match. Now add the results - added_types += check_additions(execution_trailer.children[1], add_name) - - evaluator.recursion_detector.pop_stmt() - # reset settings - settings.dynamic_params_for_other_modules = temp_param_add - return added_types - - -def check_array_instances(evaluator, instance): - """Used for set() and list() instances.""" - if not settings.dynamic_array_additions: - return instance.var_args - - ai = ArrayInstance(evaluator, instance) - from jedi.evaluate import param - return param.Arguments(evaluator, [AlreadyEvaluated([ai])]) - - -class ArrayInstance(IterableWrapper): - """ - Used for the usage of set() and list(). - This is definitely a hack, but a good one :-) - It makes it possible to use set/list conversions. - - In contrast to Array, ListComprehension and all other iterable types, this - is something that is only used inside `evaluate/compiled/fake/builtins.py` - and therefore doesn't need `names_dicts`, `py__bool__` and so on, because - we don't use these operations in `builtins.py`. - """ - def __init__(self, evaluator, instance): - self._evaluator = evaluator - self.instance = instance - self.var_args = instance.var_args - - def iter_content(self): - """ - The index is here just ignored, because of all the appends, etc. - lists/sets are too complicated too handle that. - """ - items = [] - for key, nodes in self.var_args.unpack(): - for node in nodes: - for typ in self._evaluator.eval_element(node): - items += get_iterator_types([typ]) - - module = self.var_args.get_parent_until() - is_list = str(self.instance.name) == 'list' - items += _check_array_additions(self._evaluator, self.instance, module, is_list) - return items - - -class Slice(object): - def __init__(self, evaluator, start, stop, step): - self._evaluator = evaluator - # all of them are either a Precedence or None. - self._start = start - self._stop = stop - self._step = step - - @property - def obj(self): - """ - Imitate CompiledObject.obj behavior and return a ``builtin.slice()`` - object. - """ - def get(element): - if element is None: - return None - - result = self._evaluator.eval_element(element) - if len(result) != 1: - # We want slices to be clear defined with just one type. - # Otherwise we will return an empty slice object. - raise IndexError - try: - return result[0].obj - except AttributeError: - return None - - try: - return slice(get(self._start), get(self._stop), get(self._step)) - except IndexError: - return slice(None, None, None) - - -def create_indexes_or_slices(evaluator, index): - if tree.is_node(index, 'subscript'): # subscript is a slice operation. - start, stop, step = None, None, None - result = [] - for el in index.children: - if el == ':': - if not result: - result.append(None) - elif tree.is_node(el, 'sliceop'): - if len(el.children) == 2: - result.append(el.children[1]) - else: - result.append(el) - result += [None] * (3 - len(result)) - - return (Slice(evaluator, *result),) - return evaluator.eval_element(index) diff --git a/pythonFiles/release/jedi/evaluate/jedi_typing.py b/pythonFiles/release/jedi/evaluate/jedi_typing.py new file mode 100644 index 000000000000..f48a567327eb --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/jedi_typing.py @@ -0,0 +1,100 @@ +""" +This module is not intended to be used in jedi, rather it will be fed to the +jedi-parser to replace classes in the typing module +""" + +try: + from collections import abc +except ImportError: + # python 2 + import collections as abc + + +def factory(typing_name, indextypes): + class Iterable(abc.Iterable): + def __iter__(self): + while True: + yield indextypes[0]() + + class Iterator(Iterable, abc.Iterator): + def next(self): + """ needed for python 2 """ + return self.__next__() + + def __next__(self): + return indextypes[0]() + + class Sequence(abc.Sequence): + def __getitem__(self, index): + return indextypes[0]() + + class MutableSequence(Sequence, abc.MutableSequence): + pass + + class List(MutableSequence, list): + pass + + class Tuple(Sequence, tuple): + def __getitem__(self, index): + if indextypes[1] == Ellipsis: + # https://www.python.org/dev/peps/pep-0484/#the-typing-module + # Tuple[int, ...] means a tuple of ints of indetermined length + return indextypes[0]() + else: + return indextypes[index]() + + class AbstractSet(Iterable, abc.Set): + pass + + class MutableSet(AbstractSet, abc.MutableSet): + pass + + class KeysView(Iterable, abc.KeysView): + pass + + class ValuesView(abc.ValuesView): + def __iter__(self): + while True: + yield indextypes[1]() + + class ItemsView(abc.ItemsView): + def __iter__(self): + while True: + yield (indextypes[0](), indextypes[1]()) + + class Mapping(Iterable, abc.Mapping): + def __getitem__(self, item): + return indextypes[1]() + + def keys(self): + return KeysView() + + def values(self): + return ValuesView() + + def items(self): + return ItemsView() + + class MutableMapping(Mapping, abc.MutableMapping): + pass + + class Dict(MutableMapping, dict): + pass + + dct = { + "Sequence": Sequence, + "MutableSequence": MutableSequence, + "List": List, + "Iterable": Iterable, + "Iterator": Iterator, + "AbstractSet": AbstractSet, + "MutableSet": MutableSet, + "Mapping": Mapping, + "MutableMapping": MutableMapping, + "Tuple": Tuple, + "KeysView": KeysView, + "ItemsView": ItemsView, + "ValuesView": ValuesView, + "Dict": Dict, + } + return dct[typing_name] diff --git a/pythonFiles/release/jedi/evaluate/lazy_context.py b/pythonFiles/release/jedi/evaluate/lazy_context.py new file mode 100644 index 000000000000..9380212e7ce7 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/lazy_context.py @@ -0,0 +1,61 @@ +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS + +class AbstractLazyContext(object): + def __init__(self, data): + self.data = data + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.data) + + def infer(self): + raise NotImplementedError + + +class LazyKnownContext(AbstractLazyContext): + """data is a context.""" + def infer(self): + return ContextSet(self.data) + + +class LazyKnownContexts(AbstractLazyContext): + """data is a ContextSet.""" + def infer(self): + return self.data + + +class LazyUnknownContext(AbstractLazyContext): + def __init__(self): + super(LazyUnknownContext, self).__init__(None) + + def infer(self): + return NO_CONTEXTS + + +class LazyTreeContext(AbstractLazyContext): + def __init__(self, context, node): + super(LazyTreeContext, self).__init__(node) + self._context = context + # We need to save the predefined names. It's an unfortunate side effect + # that needs to be tracked otherwise results will be wrong. + self._predefined_names = dict(context.predefined_names) + + def infer(self): + old, self._context.predefined_names = \ + self._context.predefined_names, self._predefined_names + try: + return self._context.eval_node(self.data) + finally: + self._context.predefined_names = old + + +def get_merged_lazy_context(lazy_contexts): + if len(lazy_contexts) > 1: + return MergedLazyContexts(lazy_contexts) + else: + return lazy_contexts[0] + + +class MergedLazyContexts(AbstractLazyContext): + """data is a list of lazy contexts.""" + def infer(self): + return ContextSet.from_sets(l.infer() for l in self.data) diff --git a/pythonFiles/release/jedi/evaluate/param.py b/pythonFiles/release/jedi/evaluate/param.py index 8524bf958582..a46394ce8a81 100755 --- a/pythonFiles/release/jedi/evaluate/param.py +++ b/pythonFiles/release/jedi/evaluate/param.py @@ -1,303 +1,128 @@ from collections import defaultdict -from itertools import chain -from jedi._compatibility import unicode, zip_longest -from jedi import debug -from jedi import common -from jedi.parser import tree -from jedi.evaluate import iterable +from jedi.evaluate.utils import PushBackIterator from jedi.evaluate import analysis -from jedi.evaluate import precedence -from jedi.evaluate.helpers import FakeName -from jedi.cache import underscore_memoization +from jedi.evaluate.lazy_context import LazyKnownContext, \ + LazyTreeContext, LazyUnknownContext +from jedi.evaluate import docstrings +from jedi.evaluate import pep0484 +from jedi.evaluate.context import iterable -class Arguments(tree.Base): - def __init__(self, evaluator, argument_node, trailer=None): - """ - The argument_node is either a parser node or a list of evaluated - objects. Those evaluated objects may be lists of evaluated objects - themselves (one list for the first argument, one for the second, etc). +def _add_argument_issue(parent_context, error_name, lazy_context, message): + if isinstance(lazy_context, LazyTreeContext): + node = lazy_context.data + if node.parent.type == 'argument': + node = node.parent + analysis.add(parent_context, error_name, node, message) - :param argument_node: May be an argument_node or a list of nodes. - """ - self.argument_node = argument_node - self._evaluator = evaluator - self.trailer = trailer # Can be None, e.g. in a class definition. - def _split(self): - if isinstance(self.argument_node, (tuple, list)): - for el in self.argument_node: - yield 0, el - else: - if not tree.is_node(self.argument_node, 'arglist'): - yield 0, self.argument_node - return - - iterator = iter(self.argument_node.children) - for child in iterator: - if child == ',': - continue - elif child in ('*', '**'): - yield len(child.value), next(iterator) - else: - yield 0, child - - def get_parent_until(self, *args, **kwargs): - if self.trailer is None: - try: - element = self.argument_node[0] - from jedi.evaluate.iterable import AlreadyEvaluated - if isinstance(element, AlreadyEvaluated): - element = self._evaluator.eval_element(element)[0] - except IndexError: - return None - else: - return element.get_parent_until(*args, **kwargs) - else: - return self.trailer.get_parent_until(*args, **kwargs) - - def as_tuple(self): - for stars, argument in self._split(): - if tree.is_node(argument, 'argument'): - argument, default = argument.children[::2] - else: - default = None - yield argument, default, stars - - def unpack(self, func=None): - named_args = [] - for stars, el in self._split(): - if stars == 1: - arrays = self._evaluator.eval_element(el) - iterators = [_iterate_star_args(self._evaluator, a, el, func) - for a in arrays] - iterators = list(iterators) - for values in list(zip_longest(*iterators)): - yield None, [v for v in values if v is not None] - elif stars == 2: - arrays = self._evaluator.eval_element(el) - dicts = [_star_star_dict(self._evaluator, a, el, func) - for a in arrays] - for dct in dicts: - for key, values in dct.items(): - yield key, values - else: - if tree.is_node(el, 'argument'): - c = el.children - if len(c) == 3: # Keyword argument. - named_args.append((c[0].value, (c[2],))) - else: # Generator comprehension. - # Include the brackets with the parent. - comp = iterable.GeneratorComprehension( - self._evaluator, self.argument_node.parent) - yield None, (iterable.AlreadyEvaluated([comp]),) - elif isinstance(el, (list, tuple)): - yield None, el - else: - yield None, (el,) - - # Reordering var_args is necessary, because star args sometimes appear - # after named argument, but in the actual order it's prepended. - for key_arg in named_args: - yield key_arg - - def _reorder_var_args(var_args): - named_index = None - new_args = [] - for i, stmt in enumerate(var_args): - if isinstance(stmt, tree.ExprStmt): - if named_index is None and stmt.assignment_details: - named_index = i - - if named_index is not None: - expression_list = stmt.expression_list() - if expression_list and expression_list[0] == '*': - new_args.insert(named_index, stmt) - named_index += 1 - continue - - new_args.append(stmt) - return new_args - - def eval_argument_clinic(self, arguments): - """Uses a list with argument clinic information (see PEP 436).""" - iterator = self.unpack() - for i, (name, optional, allow_kwargs) in enumerate(arguments): - key, va_values = next(iterator, (None, [])) - if key is not None: - raise NotImplementedError - if not va_values and not optional: - debug.warning('TypeError: %s expected at least %s arguments, got %s', - name, len(arguments), i) - raise ValueError - values = list(chain.from_iterable(self._evaluator.eval_element(el) - for el in va_values)) - if not values and not optional: - # For the stdlib we always want values. If we don't get them, - # that's ok, maybe something is too hard to resolve, however, - # we will not proceed with the evaluation of that function. - debug.warning('argument_clinic "%s" not resolvable.', name) - raise ValueError - yield values - - def scope(self): - # Returns the scope in which the arguments are used. - return (self.trailer or self.argument_node).get_parent_until(tree.IsScope) - - def eval_args(self): - # TODO this method doesn't work with named args and a lot of other - # things. Use unpack. - return [self._evaluator.eval_element(el) for stars, el in self._split()] - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.argument_node) - - def get_calling_var_args(self): - if tree.is_node(self.argument_node, 'arglist', 'argument') \ - or self.argument_node == () and self.trailer is not None: - return _get_calling_var_args(self._evaluator, self) - else: - return None - - -class ExecutedParam(tree.Param): +class ExecutedParam(object): """Fake a param and give it values.""" - def __init__(self, original_param, var_args, values): - self._original_param = original_param - self.var_args = var_args - self._values = values + def __init__(self, execution_context, param_node, lazy_context): + self._execution_context = execution_context + self._param_node = param_node + self._lazy_context = lazy_context + self.string_name = param_node.name.value - def eval(self, evaluator): - types = [] - for v in self._values: - types += evaluator.eval_element(v) - return types + def infer(self): + pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node) + doc_params = docstrings.infer_param(self._execution_context, self._param_node) + if pep0484_hints or doc_params: + return pep0484_hints | doc_params - @property - def position_nr(self): - # Need to use the original logic here, because it uses the parent. - return self._original_param.position_nr + return self._lazy_context.infer() @property - @underscore_memoization - def name(self): - return FakeName(str(self._original_param.name), self, self.start_pos) - - def __getattr__(self, name): - return getattr(self._original_param, name) - - -def _get_calling_var_args(evaluator, var_args): - old_var_args = None - while var_args != old_var_args: - old_var_args = var_args - for name, default, stars in reversed(list(var_args.as_tuple())): - if not stars or not isinstance(name, tree.Name): - continue + def var_args(self): + return self._execution_context.var_args - names = evaluator.goto(name) - if len(names) != 1: - break - param = names[0].get_definition() - if not isinstance(param, ExecutedParam): - if isinstance(param, tree.Param): - # There is no calling var_args in this case - there's just - # a param without any input. - return None - break - # We never want var_args to be a tuple. This should be enough for - # now, we can change it later, if we need to. - if isinstance(param.var_args, Arguments): - var_args = param.var_args - return var_args.argument_node or var_args.trailer + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.string_name) -def get_params(evaluator, func, var_args): - param_names = [] +def get_params(execution_context, var_args): + result_params = [] param_dict = {} - for param in func.params: - param_dict[str(param.name)] = param - unpacked_va = list(var_args.unpack(func)) - from jedi.evaluate.representation import InstanceElement - if isinstance(func, InstanceElement): - # Include self at this place. - unpacked_va.insert(0, (None, [iterable.AlreadyEvaluated([func.instance])])) - var_arg_iterator = common.PushBackIterator(iter(unpacked_va)) + funcdef = execution_context.tree_node + parent_context = execution_context.parent_context + + for param in funcdef.get_params(): + param_dict[param.name.value] = param + unpacked_va = list(var_args.unpack(funcdef)) + var_arg_iterator = PushBackIterator(iter(unpacked_va)) non_matching_keys = defaultdict(lambda: []) keys_used = {} keys_only = False had_multiple_value_error = False - for param in func.params: + for param in funcdef.get_params(): # The value and key can both be null. There, the defaults apply. # args / kwargs will just be empty arrays / dicts, respectively. # Wrong value count is just ignored. If you try to test cases that are # not allowed in Python, Jedi will maybe not show any completions. - default = [] if param.default is None else [param.default] - key, va_values = next(var_arg_iterator, (None, default)) + key, argument = next(var_arg_iterator, (None, None)) while key is not None: keys_only = True - k = unicode(key) try: - key_param = param_dict[unicode(key)] + key_param = param_dict[key] except KeyError: - non_matching_keys[key] += va_values + non_matching_keys[key] = argument else: - param_names.append(ExecutedParam(key_param, var_args, va_values).name) + if key in keys_used: + had_multiple_value_error = True + m = ("TypeError: %s() got multiple values for keyword argument '%s'." + % (funcdef.name, key)) + for node in var_args.get_calling_nodes(): + analysis.add(parent_context, 'type-error-multiple-values', + node, message=m) + else: + keys_used[key] = ExecutedParam(execution_context, key_param, argument) + key, argument = next(var_arg_iterator, (None, None)) - if k in keys_used: - had_multiple_value_error = True - m = ("TypeError: %s() got multiple values for keyword argument '%s'." - % (func.name, k)) - calling_va = _get_calling_var_args(evaluator, var_args) - if calling_va is not None: - analysis.add(evaluator, 'type-error-multiple-values', - calling_va, message=m) - else: - try: - keys_used[k] = param_names[-1] - except IndexError: - # TODO this is wrong stupid and whatever. - pass - key, va_values = next(var_arg_iterator, (None, ())) + try: + result_params.append(keys_used[param.name.value]) + continue + except KeyError: + pass - values = [] - if param.stars == 1: + if param.star_count == 1: # *args param - lst_values = [iterable.MergedNodes(va_values)] if va_values else [] - for key, va_values in var_arg_iterator: - # Iterate until a key argument is found. - if key: - var_arg_iterator.push_back((key, va_values)) - break - if va_values: - lst_values.append(iterable.MergedNodes(va_values)) - seq = iterable.FakeSequence(evaluator, lst_values, 'tuple') - values = [iterable.AlreadyEvaluated([seq])] - elif param.stars == 2: + lazy_context_list = [] + if argument is not None: + lazy_context_list.append(argument) + for key, argument in var_arg_iterator: + # Iterate until a key argument is found. + if key: + var_arg_iterator.push_back((key, argument)) + break + lazy_context_list.append(argument) + seq = iterable.FakeSequence(execution_context.evaluator, 'tuple', lazy_context_list) + result_arg = LazyKnownContext(seq) + elif param.star_count == 2: # **kwargs param - dct = iterable.FakeDict(evaluator, dict(non_matching_keys)) - values = [iterable.AlreadyEvaluated([dct])] + dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys)) + result_arg = LazyKnownContext(dct) non_matching_keys = {} else: # normal param - if va_values: - values = va_values - else: + if argument is None: # No value: Return an empty container - values = [] - if not keys_only: - calling_va = var_args.get_calling_var_args() - if calling_va is not None: - m = _error_argument_count(func, len(unpacked_va)) - analysis.add(evaluator, 'type-error-too-few-arguments', - calling_va, message=m) + if param.default is None: + result_arg = LazyUnknownContext() + if not keys_only: + for node in var_args.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + analysis.add(parent_context, 'type-error-too-few-arguments', + node, message=m) + else: + result_arg = LazyTreeContext(parent_context, param.default) + else: + result_arg = argument - # Now add to result if it's not one of the previously covered cases. - if (not keys_only or param.stars == 2): - param_names.append(ExecutedParam(param, var_args, values).name) - keys_used[unicode(param.name)] = param_names[-1] + result_params.append(ExecutedParam(execution_context, param, result_arg)) + if not isinstance(result_arg, LazyUnknownContext): + keys_used[param.name.value] = result_params[-1] if keys_only: # All arguments should be handed over to the next function. It's not @@ -305,99 +130,66 @@ def get_params(evaluator, func, var_args): # there's nothing to find for certain names. for k in set(param_dict) - set(keys_used): param = param_dict[k] - values = [] if param.default is None else [param.default] - param_names.append(ExecutedParam(param, var_args, values).name) - if not (non_matching_keys or had_multiple_value_error - or param.stars or param.default): + if not (non_matching_keys or had_multiple_value_error or + param.star_count or param.default): # add a warning only if there's not another one. - calling_va = _get_calling_var_args(evaluator, var_args) - if calling_va is not None: - m = _error_argument_count(func, len(unpacked_va)) - analysis.add(evaluator, 'type-error-too-few-arguments', - calling_va, message=m) + for node in var_args.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + analysis.add(parent_context, 'type-error-too-few-arguments', + node, message=m) - for key, va_values in non_matching_keys.items(): + for key, lazy_context in non_matching_keys.items(): m = "TypeError: %s() got an unexpected keyword argument '%s'." \ - % (func.name, key) - for value in va_values: - analysis.add(evaluator, 'type-error-keyword-argument', value.parent, message=m) - - remaining_params = list(var_arg_iterator) - if remaining_params: - m = _error_argument_count(func, len(unpacked_va)) + % (funcdef.name, key) + _add_argument_issue( + parent_context, + 'type-error-keyword-argument', + lazy_context, + message=m + ) + + remaining_arguments = list(var_arg_iterator) + if remaining_arguments: + m = _error_argument_count(funcdef, len(unpacked_va)) # Just report an error for the first param that is not needed (like # cPython). - first_key, first_values = remaining_params[0] - for v in first_values: - if first_key is not None: - # Is a keyword argument, return the whole thing instead of just - # the value node. - v = v.parent - try: - non_kw_param = keys_used[first_key] - except KeyError: - pass - else: - origin_args = non_kw_param.parent.var_args.argument_node - # TODO calculate the var_args tree and check if it's in - # the tree (if not continue). - # print('\t\tnonkw', non_kw_param.parent.var_args.argument_node, ) - if origin_args not in [f.parent.parent for f in first_values]: - continue - analysis.add(evaluator, 'type-error-too-many-arguments', - v, message=m) - return param_names + first_key, lazy_context = remaining_arguments[0] + if var_args.get_calling_nodes(): + # There might not be a valid calling node so check for that first. + _add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m) + return result_params -def _iterate_star_args(evaluator, array, input_node, func=None): - from jedi.evaluate.representation import Instance - if isinstance(array, iterable.Array): - for field_stmt in array: # yield from plz! - yield field_stmt - elif isinstance(array, iterable.Generator): - for field_stmt in array.iter_content(): - yield iterable.AlreadyEvaluated([field_stmt]) - elif isinstance(array, Instance) and array.name.get_code() == 'tuple': - debug.warning('Ignored a tuple *args input %s' % array) - else: - if func is not None: - m = "TypeError: %s() argument after * must be a sequence, not %s" \ - % (func.name.value, array) - analysis.add(evaluator, 'type-error-star', input_node, message=m) - - -def _star_star_dict(evaluator, array, input_node, func): - dct = defaultdict(lambda: []) - from jedi.evaluate.representation import Instance - if isinstance(array, Instance) and array.name.get_code() == 'dict': - # For now ignore this case. In the future add proper iterators and just - # make one call without crazy isinstance checks. - return {} - - if isinstance(array, iterable.FakeDict): - return array._dct - elif isinstance(array, iterable.Array) and array.type == 'dict': - # TODO bad call to non-public API - for key_node, values in array._items(): - for key in evaluator.eval_element(key_node): - if precedence.is_string(key): - dct[key.obj] += values - - else: - if func is not None: - m = "TypeError: %s argument after ** must be a mapping, not %s" \ - % (func.name.value, array) - analysis.add(evaluator, 'type-error-star-star', input_node, message=m) - return dict(dct) - - -def _error_argument_count(func, actual_count): - default_arguments = sum(1 for p in func.params if p.default or p.stars) +def _error_argument_count(funcdef, actual_count): + params = funcdef.get_params() + default_arguments = sum(1 for p in params if p.default or p.star_count) if default_arguments == 0: before = 'exactly ' else: - before = 'from %s to ' % (len(func.params) - default_arguments) + before = 'from %s to ' % (len(params) - default_arguments) return ('TypeError: %s() takes %s%s arguments (%s given).' - % (func.name, before, len(func.params), actual_count)) + % (funcdef.name, before, len(params), actual_count)) + + +def _create_default_param(execution_context, param): + if param.star_count == 1: + result_arg = LazyKnownContext( + iterable.FakeSequence(execution_context.evaluator, 'tuple', []) + ) + elif param.star_count == 2: + result_arg = LazyKnownContext( + iterable.FakeDict(execution_context.evaluator, {}) + ) + elif param.default is None: + result_arg = LazyUnknownContext() + else: + result_arg = LazyTreeContext(execution_context.parent_context, param.default) + return ExecutedParam(execution_context, param, result_arg) + + +def create_default_params(execution_context, funcdef): + return [_create_default_param(execution_context, p) + for p in funcdef.get_params()] + diff --git a/pythonFiles/release/jedi/evaluate/parser_cache.py b/pythonFiles/release/jedi/evaluate/parser_cache.py new file mode 100644 index 000000000000..84fe52d977f8 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/parser_cache.py @@ -0,0 +1,6 @@ +from jedi.evaluate.cache import evaluator_function_cache + + +@evaluator_function_cache() +def get_yield_exprs(evaluator, funcdef): + return list(funcdef.iter_yield_exprs()) diff --git a/pythonFiles/release/jedi/evaluate/pep0484.py b/pythonFiles/release/jedi/evaluate/pep0484.py new file mode 100644 index 000000000000..820f112c54e0 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/pep0484.py @@ -0,0 +1,222 @@ +""" +PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints +through function annotations. There is a strong suggestion in this document +that only the type of type hinting defined in PEP0484 should be allowed +as annotations in future python versions. + +The (initial / probably incomplete) implementation todo list for pep-0484: +v Function parameter annotations with builtin/custom type classes +v Function returntype annotations with builtin/custom type classes +v Function parameter annotations with strings (forward reference) +v Function return type annotations with strings (forward reference) +v Local variable type hints +v Assigned types: `Url = str\ndef get(url:Url) -> str:` +v Type hints in `with` statements +x Stub files support +x support `@no_type_check` and `@no_type_check_decorator` +x support for typing.cast() operator +x support for type hint comments for functions, `# type: (int, str) -> int`. + See comment from Guido https://github.com/davidhalter/jedi/issues/662 +""" + +import os +import re + +from parso import ParserSyntaxError +from parso.python import tree + +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate import compiled +from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet +from jedi.evaluate.lazy_context import LazyTreeContext +from jedi.evaluate.context import ModuleContext +from jedi import debug +from jedi import _compatibility +from jedi import parser_utils + + +def _evaluate_for_annotation(context, annotation, index=None): + """ + Evaluates a string-node, looking for an annotation + If index is not None, the annotation is expected to be a tuple + and we're interested in that index + """ + if annotation is not None: + context_set = context.eval_node(_fix_forward_reference(context, annotation)) + if index is not None: + context_set = context_set.filter( + lambda context: context.array_type == 'tuple' \ + and len(list(context.py__iter__())) >= index + ).py__getitem__(index) + return context_set.execute_evaluated() + else: + return NO_CONTEXTS + + +def _fix_forward_reference(context, node): + evaled_nodes = context.eval_node(node) + if len(evaled_nodes) != 1: + debug.warning("Eval'ed typing index %s should lead to 1 object, " + " not %s" % (node, evaled_nodes)) + return node + evaled_node = list(evaled_nodes)[0] + if isinstance(evaled_node, compiled.CompiledObject) and \ + isinstance(evaled_node.obj, str): + try: + new_node = context.evaluator.grammar.parse( + _compatibility.unicode(evaled_node.obj), + start_symbol='eval_input', + error_recovery=False + ) + except ParserSyntaxError: + debug.warning('Annotation not parsed: %s' % evaled_node.obj) + return node + else: + module = node.get_root_node() + parser_utils.move(new_node, module.end_pos[0]) + new_node.parent = context.tree_node + return new_node + else: + return node + + +@evaluator_method_cache() +def infer_param(execution_context, param): + annotation = param.annotation + module_context = execution_context.get_root_context() + return _evaluate_for_annotation(module_context, annotation) + + +def py__annotations__(funcdef): + return_annotation = funcdef.annotation + if return_annotation: + dct = {'return': return_annotation} + else: + dct = {} + for function_param in funcdef.get_params(): + param_annotation = function_param.annotation + if param_annotation is not None: + dct[function_param.name.value] = param_annotation + return dct + + +@evaluator_method_cache() +def infer_return_types(function_context): + annotation = py__annotations__(function_context.tree_node).get("return", None) + module_context = function_context.get_root_context() + return _evaluate_for_annotation(module_context, annotation) + + +_typing_module = None + + +def _get_typing_replacement_module(grammar): + """ + The idea is to return our jedi replacement for the PEP-0484 typing module + as discussed at https://github.com/davidhalter/jedi/issues/663 + """ + global _typing_module + if _typing_module is None: + typing_path = \ + os.path.abspath(os.path.join(__file__, "../jedi_typing.py")) + with open(typing_path) as f: + code = _compatibility.unicode(f.read()) + _typing_module = grammar.parse(code) + return _typing_module + + +def py__getitem__(context, typ, node): + if not typ.get_root_context().name.string_name == "typing": + return None + # we assume that any class using [] in a module called + # "typing" with a name for which we have a replacement + # should be replaced by that class. This is not 100% + # airtight but I don't have a better idea to check that it's + # actually the PEP-0484 typing module and not some other + if node.type == "subscriptlist": + nodes = node.children[::2] # skip the commas + else: + nodes = [node] + del node + + nodes = [_fix_forward_reference(context, node) for node in nodes] + type_name = typ.name.string_name + + # hacked in Union and Optional, since it's hard to do nicely in parsed code + if type_name in ("Union", '_Union'): + # In Python 3.6 it's still called typing.Union but it's an instance + # called _Union. + return ContextSet.from_sets(context.eval_node(node) for node in nodes) + if type_name in ("Optional", '_Optional'): + # Here we have the same issue like in Union. Therefore we also need to + # check for the instance typing._Optional (Python 3.6). + return context.eval_node(nodes[0]) + + typing = ModuleContext( + context.evaluator, + module_node=_get_typing_replacement_module(context.evaluator.latest_grammar), + path=None + ) + factories = typing.py__getattribute__("factory") + assert len(factories) == 1 + factory = list(factories)[0] + assert factory + function_body_nodes = factory.tree_node.children[4].children + valid_classnames = set(child.name.value + for child in function_body_nodes + if isinstance(child, tree.Class)) + if type_name not in valid_classnames: + return None + compiled_classname = compiled.create(context.evaluator, type_name) + + from jedi.evaluate.context.iterable import FakeSequence + args = FakeSequence( + context.evaluator, + "tuple", + [LazyTreeContext(context, n) for n in nodes] + ) + + result = factory.execute_evaluated(compiled_classname, args) + return result + + +def find_type_from_comment_hint_for(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[1], name) + + +def find_type_from_comment_hint_with(context, node, name): + assert len(node.children[1].children) == 3, \ + "Can only be here when children[1] is 'foo() as f'" + varlist = node.children[1].children[2] + return _find_type_from_comment_hint(context, node, varlist, name) + + +def find_type_from_comment_hint_assign(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[0], name) + + +def _find_type_from_comment_hint(context, node, varlist, name): + index = None + if varlist.type in ("testlist_star_expr", "exprlist", "testlist"): + # something like "a, b = 1, 2" + index = 0 + for child in varlist.children: + if child == name: + break + if child.type == "operator": + continue + index += 1 + else: + return [] + + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return [] + match = re.match(r"^#\s*type:\s*([^#]*)", comment) + if not match: + return [] + annotation = tree.String( + repr(str(match.group(1).strip())), + node.start_pos) + annotation.parent = node.parent + return _evaluate_for_annotation(context, annotation, index) diff --git a/pythonFiles/release/jedi/evaluate/precedence.py b/pythonFiles/release/jedi/evaluate/precedence.py deleted file mode 100755 index 7a2ee6d2763b..000000000000 --- a/pythonFiles/release/jedi/evaluate/precedence.py +++ /dev/null @@ -1,174 +0,0 @@ -""" -Handles operator precedence. -""" -import operator - -from jedi._compatibility import unicode -from jedi.parser import tree -from jedi import debug -from jedi.evaluate.compiled import (CompiledObject, create, builtin, - keyword_from_value, true_obj, false_obj) -from jedi.evaluate import analysis - -# Maps Python syntax to the operator module. -COMPARISON_OPERATORS = { - '==': operator.eq, - '!=': operator.ne, - 'is': operator.is_, - 'is not': operator.is_not, - '<': operator.lt, - '<=': operator.le, - '>': operator.gt, - '>=': operator.ge, -} - - -def _literals_to_types(evaluator, result): - # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), - # int(), float(), etc). - for i, r in enumerate(result): - if is_literal(r): - # Literals are only valid as long as the operations are - # correct. Otherwise add a value-free instance. - cls = builtin.get_by_name(r.name.get_code()) - result[i] = evaluator.execute(cls)[0] - return list(set(result)) - - -def calculate_children(evaluator, children): - """ - Calculate a list of children with operators. - """ - iterator = iter(children) - types = evaluator.eval_element(next(iterator)) - for operator in iterator: - right = next(iterator) - if tree.is_node(operator, 'comp_op'): # not in / is not - operator = ' '.join(str(c.value) for c in operator.children) - - # handle lazy evaluation of and/or here. - if operator in ('and', 'or'): - left_bools = set([left.py__bool__() for left in types]) - if left_bools == set([True]): - if operator == 'and': - types = evaluator.eval_element(right) - elif left_bools == set([False]): - if operator != 'and': - types = evaluator.eval_element(right) - # Otherwise continue, because of uncertainty. - else: - types = calculate(evaluator, types, operator, - evaluator.eval_element(right)) - debug.dbg('calculate_children types %s', types) - return types - - -def calculate(evaluator, left_result, operator, right_result): - result = [] - if not left_result or not right_result: - # illegal slices e.g. cause left/right_result to be None - result = (left_result or []) + (right_result or []) - result = _literals_to_types(evaluator, result) - else: - # I don't think there's a reasonable chance that a string - # operation is still correct, once we pass something like six - # objects. - if len(left_result) * len(right_result) > 6: - result = _literals_to_types(evaluator, left_result + right_result) - else: - for left in left_result: - for right in right_result: - result += _element_calculate(evaluator, left, operator, right) - return result - - -def factor_calculate(evaluator, types, operator): - """ - Calculates `+`, `-`, `~` and `not` prefixes. - """ - for typ in types: - if operator == '-': - if _is_number(typ): - yield create(evaluator, -typ.obj) - elif operator == 'not': - value = typ.py__bool__() - if value is None: # Uncertainty. - return - yield keyword_from_value(not value) - else: - yield typ - - -def _is_number(obj): - return isinstance(obj, CompiledObject) \ - and isinstance(obj.obj, (int, float)) - - -def is_string(obj): - return isinstance(obj, CompiledObject) \ - and isinstance(obj.obj, (str, unicode)) - - -def is_literal(obj): - return _is_number(obj) or is_string(obj) - - -def _is_tuple(obj): - from jedi.evaluate import iterable - return isinstance(obj, iterable.Array) and obj.type == 'tuple' - - -def _is_list(obj): - from jedi.evaluate import iterable - return isinstance(obj, iterable.Array) and obj.type == 'list' - - -def _element_calculate(evaluator, left, operator, right): - from jedi.evaluate import iterable, representation as er - l_is_num = _is_number(left) - r_is_num = _is_number(right) - if operator == '*': - # for iterables, ignore * operations - if isinstance(left, iterable.Array) or is_string(left): - return [left] - elif isinstance(right, iterable.Array) or is_string(right): - return [right] - elif operator == '+': - if l_is_num and r_is_num or is_string(left) and is_string(right): - return [create(evaluator, left.obj + right.obj)] - elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): - return [iterable.MergedArray(evaluator, (left, right))] - elif operator == '-': - if l_is_num and r_is_num: - return [create(evaluator, left.obj - right.obj)] - elif operator == '%': - # With strings and numbers the left type typically remains. Except for - # `int() % float()`. - return [left] - elif operator in COMPARISON_OPERATORS: - operation = COMPARISON_OPERATORS[operator] - if isinstance(left, CompiledObject) and isinstance(right, CompiledObject): - # Possible, because the return is not an option. Just compare. - left = left.obj - right = right.obj - - try: - return [keyword_from_value(operation(left, right))] - except TypeError: - # Could be True or False. - return [true_obj, false_obj] - elif operator == 'in': - return [] - - def check(obj): - """Checks if a Jedi object is either a float or an int.""" - return isinstance(obj, er.Instance) and obj.name.get_code() in ('int', 'float') - - # Static analysis, one is a number, the other one is not. - if operator in ('+', '-') and l_is_num != r_is_num \ - and not (check(left) or check(right)): - message = "TypeError: unsupported operand type(s) for +: %s and %s" - analysis.add(evaluator, 'type-error-operation', operator, - message % (left, right)) - - return [left, right] diff --git a/pythonFiles/release/jedi/evaluate/project.py b/pythonFiles/release/jedi/evaluate/project.py new file mode 100644 index 000000000000..b90f0f0c3be8 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/project.py @@ -0,0 +1,40 @@ +import os +import sys + +from jedi.evaluate.sys_path import get_venv_path, detect_additional_paths +from jedi.cache import underscore_memoization + + +class Project(object): + def __init__(self, sys_path=None): + if sys_path is not None: + self._sys_path = sys_path + + venv = os.getenv('VIRTUAL_ENV') + if venv: + sys_path = get_venv_path(venv) + + if sys_path is None: + sys_path = sys.path + + base_sys_path = list(sys_path) + try: + base_sys_path.remove('') + except ValueError: + pass + + self._base_sys_path = base_sys_path + + def add_script_path(self, script_path): + self._script_path = script_path + + def add_evaluator(self, evaluator): + self._evaluator = evaluator + + @property + @underscore_memoization + def sys_path(self): + if self._script_path is None: + return self._base_sys_path + + return self._base_sys_path + detect_additional_paths(self._evaluator, self._script_path) diff --git a/pythonFiles/release/jedi/evaluate/recursion.py b/pythonFiles/release/jedi/evaluate/recursion.py index a4f5fbc33fc3..e2f34a4a9a06 100755 --- a/pythonFiles/release/jedi/evaluate/recursion.py +++ b/pythonFiles/release/jedi/evaluate/recursion.py @@ -6,152 +6,130 @@ Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not thread-safe. Why? ``execution_recursion_decorator`` uses class variables to count the function calls. + +.. _settings-recursion: + +Settings +~~~~~~~~~~ + +Recursion settings are important if you don't want extremly +recursive python code to go absolutely crazy. + +The default values are based on experiments while completing the |jedi| library +itself (inception!). But I don't think there's any other Python library that +uses recursion in a similarly extreme way. Completion should also be fast and +therefore the quality might not always be maximal. + +.. autodata:: recursion_limit +.. autodata:: total_function_execution_limit +.. autodata:: per_function_execution_limit +.. autodata:: per_function_recursion_limit """ -from jedi import debug -from jedi import settings -from jedi.evaluate import compiled -from jedi.evaluate import iterable +from contextlib import contextmanager + +from jedi import debug +from jedi.evaluate.base_context import NO_CONTEXTS -def recursion_decorator(func): - def run(evaluator, stmt, *args, **kwargs): - rec_detect = evaluator.recursion_detector - # print stmt, len(self.node_statements()) - if rec_detect.push_stmt(stmt): - return [] - else: - result = func(evaluator, stmt, *args, **kwargs) - rec_detect.pop_stmt() - return result - return run +recursion_limit = 15 +""" +Like ``sys.getrecursionlimit()``, just for |jedi|. +""" +total_function_execution_limit = 200 +""" +This is a hard limit of how many non-builtin functions can be executed. +""" +per_function_execution_limit = 6 +""" +The maximal amount of times a specific function may be executed. +""" +per_function_recursion_limit = 2 +""" +A function may not be executed more than this number of times recursively. +""" class RecursionDetector(object): + def __init__(self): + self.pushed_nodes = [] + + +@contextmanager +def execution_allowed(evaluator, node): """ A decorator to detect recursions in statements. In a recursion a statement at the same place, in the same module may not be executed two times. """ - def __init__(self): - self.top = None - self.current = None - - def push_stmt(self, stmt): - self.current = _RecursionNode(stmt, self.current) - check = self._check_recursion() - if check: - debug.warning('catched stmt recursion: %s against %s @%s', stmt, - check.stmt, stmt.start_pos) - self.pop_stmt() - return True - return False - - def pop_stmt(self): - if self.current is not None: - # I don't know how current can be None, but sometimes it happens - # with Python3. - self.current = self.current.parent - - def _check_recursion(self): - test = self.current - while True: - test = test.parent - if self.current == test: - return test - if not test: - return False - - def node_statements(self): - result = [] - n = self.current - while n: - result.insert(0, n.stmt) - n = n.parent - return result - - -class _RecursionNode(object): - """ A node of the RecursionDecorator. """ - def __init__(self, stmt, parent): - self.script = stmt.get_parent_until() - self.position = stmt.start_pos - self.parent = parent - self.stmt = stmt - - # Don't check param instances, they are not causing recursions - # The same's true for the builtins, because the builtins are really - # simple. - self.is_ignored = self.script == compiled.builtin - - def __eq__(self, other): - if not other: - return None - - return self.script == other.script \ - and self.position == other.position \ - and not self.is_ignored and not other.is_ignored - - -def execution_recursion_decorator(func): - def run(execution, **kwargs): - detector = execution._evaluator.execution_recursion_detector - if detector.push_execution(execution): - result = [] - else: - result = func(execution, **kwargs) - detector.pop_execution() - return result - - return run + pushed_nodes = evaluator.recursion_detector.pushed_nodes + + if node in pushed_nodes: + debug.warning('catched stmt recursion: %s @%s', node, + node.start_pos) + yield False + else: + pushed_nodes.append(node) + yield True + pushed_nodes.pop() + + +def execution_recursion_decorator(default=NO_CONTEXTS): + def decorator(func): + def wrapper(execution, **kwargs): + detector = execution.evaluator.execution_recursion_detector + allowed = detector.push_execution(execution) + try: + if allowed: + result = default + else: + result = func(execution, **kwargs) + finally: + detector.pop_execution() + return result + return wrapper + return decorator class ExecutionRecursionDetector(object): """ Catches recursions of executions. - It is designed like a Singelton. Only one instance should exist. """ - def __init__(self): - self.recursion_level = 0 - self.parent_execution_funcs = [] - self.execution_funcs = set() - self.execution_count = 0 - - def __call__(self, execution): - debug.dbg('Execution recursions: %s', execution, self.recursion_level, - self.execution_count, len(self.execution_funcs)) - if self.check_recursion(execution): - result = [] - else: - result = self.func(execution) - self.pop_execution() - return result - - def pop_execution(cls): - cls.parent_execution_funcs.pop() - cls.recursion_level -= 1 - - def push_execution(cls, execution): - in_par_execution_funcs = execution.base in cls.parent_execution_funcs - in_execution_funcs = execution.base in cls.execution_funcs - cls.recursion_level += 1 - cls.execution_count += 1 - cls.execution_funcs.add(execution.base) - cls.parent_execution_funcs.append(execution.base) - - if cls.execution_count > settings.max_executions: + def __init__(self, evaluator): + self._evaluator = evaluator + + self._recursion_level = 0 + self._parent_execution_funcs = [] + self._funcdef_execution_counts = {} + self._execution_count = 0 + + def pop_execution(self): + self._parent_execution_funcs.pop() + self._recursion_level -= 1 + + def push_execution(self, execution): + funcdef = execution.tree_node + + # These two will be undone in pop_execution. + self._recursion_level += 1 + self._parent_execution_funcs.append(funcdef) + + module = execution.get_root_context() + if module == self._evaluator.BUILTINS: + # We have control over builtins so we know they are not recursing + # like crazy. Therefore we just let them execute always, because + # they usually just help a lot with getting good results. + return False + + if self._recursion_level > recursion_limit: return True - if isinstance(execution.base, (iterable.Array, iterable.Generator)): - return False - module = execution.get_parent_until() - if module == compiled.builtin: - return False + if self._execution_count >= total_function_execution_limit: + return True + self._execution_count += 1 - if in_par_execution_funcs: - if cls.recursion_level > settings.max_function_recursion_level: - return True - if in_execution_funcs and \ - len(cls.execution_funcs) > settings.max_until_execution_unique: + if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit: return True - if cls.execution_count > settings.max_executions_without_builtins: + self._funcdef_execution_counts[funcdef] += 1 + + if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit: return True return False diff --git a/pythonFiles/release/jedi/evaluate/representation.py b/pythonFiles/release/jedi/evaluate/representation.py deleted file mode 100755 index 3cfcaa9b5564..000000000000 --- a/pythonFiles/release/jedi/evaluate/representation.py +++ /dev/null @@ -1,857 +0,0 @@ -""" -Like described in the :mod:`jedi.parser.tree` module, -there's a need for an ast like module to represent the states of parsed -modules. - -But now there are also structures in Python that need a little bit more than -that. An ``Instance`` for example is only a ``Class`` before it is -instantiated. This class represents these cases. - -So, why is there also a ``Class`` class here? Well, there are decorators and -they change classes in Python 3. - -Representation modules also define "magic methods". Those methods look like -``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` -and others. Here's a list: - -====================================== ======================================== -**Method** **Description** --------------------------------------- ---------------------------------------- -py__call__(evaluator, params: Array) On callable objects, returns types. -py__bool__() Returns True/False/None; None means that - there's no certainty. -py__bases__(evaluator) Returns a list of base classes. -py__mro__(evaluator) Returns a list of classes (the mro). -py__getattribute__(evaluator, name) Returns a list of attribute values. The - name can be str or Name. -====================================== ======================================== - -__ -""" -import os -import pkgutil -import imp -import re -from itertools import chain - -from jedi._compatibility import use_metaclass, unicode, Python3Method -from jedi.parser import tree -from jedi import debug -from jedi import common -from jedi.cache import underscore_memoization, cache_star_import -from jedi.evaluate.cache import memoize_default, CachedMetaClass, NO_DEFAULT -from jedi.evaluate import compiled -from jedi.evaluate import recursion -from jedi.evaluate import iterable -from jedi.evaluate import docstrings -from jedi.evaluate import helpers -from jedi.evaluate import param -from jedi.evaluate import flow_analysis -from jedi.evaluate import imports - - -class Executed(tree.Base): - """ - An instance is also an executable - because __init__ is called - :param var_args: The param input array, consist of a parser node or a list. - """ - def __init__(self, evaluator, base, var_args=()): - self._evaluator = evaluator - self.base = base - self.var_args = var_args - - def is_scope(self): - return True - - def get_parent_until(self, *args, **kwargs): - return tree.Base.get_parent_until(self, *args, **kwargs) - - @common.safe_property - def parent(self): - return self.base.parent - - -class Instance(use_metaclass(CachedMetaClass, Executed)): - """ - This class is used to evaluate instances. - """ - def __init__(self, evaluator, base, var_args, is_generated=False): - super(Instance, self).__init__(evaluator, base, var_args) - self.decorates = None - # Generated instances are classes that are just generated by self - # (No var_args) used. - self.is_generated = is_generated - - if base.name.get_code() in ['list', 'set'] \ - and compiled.builtin == base.get_parent_until(): - # compare the module path with the builtin name. - self.var_args = iterable.check_array_instances(evaluator, self) - elif not is_generated: - # Need to execute the __init__ function, because the dynamic param - # searching needs it. - try: - method = self.get_subscope_by_name('__init__') - except KeyError: - pass - else: - evaluator.execute(method, self.var_args) - - @property - def py__call__(self): - def actual(evaluator, params): - return evaluator.execute(method, params) - - try: - method = self.get_subscope_by_name('__call__') - except KeyError: - # Means the Instance is not callable. - raise AttributeError - - return actual - - def py__class__(self, evaluator): - return self.base - - def py__bool__(self): - # Signalize that we don't know about the bool type. - return None - - @memoize_default() - def _get_method_execution(self, func): - func = get_instance_el(self._evaluator, self, func, True) - return FunctionExecution(self._evaluator, func, self.var_args) - - def _get_func_self_name(self, func): - """ - Returns the name of the first param in a class method (which is - normally self. - """ - try: - return str(func.params[0].name) - except IndexError: - return None - - def _self_names_dict(self, add_mro=True): - names = {} - # This loop adds the names of the self object, copies them and removes - # the self. - for sub in self.base.subscopes: - if isinstance(sub, tree.Class): - continue - # Get the self name, if there's one. - self_name = self._get_func_self_name(sub) - if self_name is None: - continue - - if sub.name.value == '__init__' and not self.is_generated: - # ``__init__`` is special because the params need are injected - # this way. Therefore an execution is necessary. - if not sub.get_decorators(): - # __init__ decorators should generally just be ignored, - # because to follow them and their self variables is too - # complicated. - sub = self._get_method_execution(sub) - for name_list in sub.names_dict.values(): - for name in name_list: - if name.value == self_name and name.prev_sibling() is None: - trailer = name.next_sibling() - if tree.is_node(trailer, 'trailer') \ - and len(trailer.children) == 2 \ - and trailer.children[0] == '.': - name = trailer.children[1] # After dot. - if name.is_definition(): - arr = names.setdefault(name.value, []) - arr.append(get_instance_el(self._evaluator, self, name)) - return names - - def get_subscope_by_name(self, name): - sub = self.base.get_subscope_by_name(name) - return get_instance_el(self._evaluator, self, sub, True) - - def execute_subscope_by_name(self, name, *args): - method = self.get_subscope_by_name(name) - return self._evaluator.execute_evaluated(method, *args) - - def get_descriptor_returns(self, obj): - """ Throws a KeyError if there's no method. """ - # Arguments in __get__ descriptors are obj, class. - # `method` is the new parent of the array, don't know if that's good. - args = [obj, obj.base] if isinstance(obj, Instance) else [compiled.none_obj, obj] - try: - return self.execute_subscope_by_name('__get__', *args) - except KeyError: - return [self] - - @memoize_default() - def names_dicts(self, search_global): - yield self._self_names_dict() - - for s in self.base.py__mro__(self._evaluator)[1:]: - if not isinstance(s, compiled.CompiledObject): - # Compiled objects don't have `self.` names. - for inst in self._evaluator.execute(s): - yield inst._self_names_dict(add_mro=False) - - for names_dict in self.base.names_dicts(search_global=False, is_instance=True): - yield LazyInstanceDict(self._evaluator, self, names_dict) - - def get_index_types(self, evaluator, index_array): - indexes = iterable.create_indexes_or_slices(self._evaluator, index_array) - if any([isinstance(i, iterable.Slice) for i in indexes]): - # Slice support in Jedi is very marginal, at the moment, so just - # ignore them in case of __getitem__. - # TODO support slices in a more general way. - indexes = [] - - try: - method = self.get_subscope_by_name('__getitem__') - except KeyError: - debug.warning('No __getitem__, cannot access the array.') - return [] - else: - return self._evaluator.execute(method, [iterable.AlreadyEvaluated(indexes)]) - - @property - @underscore_memoization - def name(self): - name = self.base.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'get_imports', 'type', - 'doc', 'raw_doc']: - raise AttributeError("Instance %s: Don't touch this (%s)!" - % (self, name)) - return getattr(self.base, name) - - def __repr__(self): - dec = '' - if self.decorates is not None: - dec = " decorates " + repr(self.decorates) - return "" % (type(self).__name__, self.base, - self.var_args, dec) - - -class LazyInstanceDict(object): - def __init__(self, evaluator, instance, dct): - self._evaluator = evaluator - self._instance = instance - self._dct = dct - - def __getitem__(self, name): - return [get_instance_el(self._evaluator, self._instance, var, True) - for var in self._dct[name]] - - def values(self): - return [self[key] for key in self._dct] - - -class InstanceName(tree.Name): - def __init__(self, origin_name, parent): - super(InstanceName, self).__init__(tree.zero_position_modifier, - origin_name.value, - origin_name.start_pos) - self._origin_name = origin_name - self.parent = parent - - def is_definition(self): - return self._origin_name.is_definition() - - -def get_instance_el(evaluator, instance, var, is_class_var=False): - """ - Returns an InstanceElement if it makes sense, otherwise leaves the object - untouched. - - Basically having an InstanceElement is context information. That is needed - in quite a lot of cases, which includes Nodes like ``power``, that need to - know where a self name comes from for example. - """ - if isinstance(var, tree.Name): - parent = get_instance_el(evaluator, instance, var.parent, is_class_var) - return InstanceName(var, parent) - elif var.type != 'funcdef' \ - and isinstance(var, (Instance, compiled.CompiledObject, tree.Leaf, - tree.Module, FunctionExecution)): - return var - - var = evaluator.wrap(var) - return InstanceElement(evaluator, instance, var, is_class_var) - - -class InstanceElement(use_metaclass(CachedMetaClass, tree.Base)): - """ - InstanceElement is a wrapper for any object, that is used as an instance - variable (e.g. self.variable or class methods). - """ - def __init__(self, evaluator, instance, var, is_class_var): - self._evaluator = evaluator - self.instance = instance - self.var = var - self.is_class_var = is_class_var - - @common.safe_property - @memoize_default() - def parent(self): - par = self.var.parent - if isinstance(par, Class) and par == self.instance.base \ - or isinstance(par, tree.Class) \ - and par == self.instance.base.base: - par = self.instance - else: - par = get_instance_el(self._evaluator, self.instance, par, - self.is_class_var) - return par - - def get_parent_until(self, *args, **kwargs): - return tree.BaseNode.get_parent_until(self, *args, **kwargs) - - def get_definition(self): - return self.get_parent_until((tree.ExprStmt, tree.IsScope, tree.Import)) - - def get_decorated_func(self): - """ Needed because the InstanceElement should not be stripped """ - func = self.var.get_decorated_func() - func = get_instance_el(self._evaluator, self.instance, func) - return func - - def get_rhs(self): - return get_instance_el(self._evaluator, self.instance, - self.var.get_rhs(), self.is_class_var) - - def is_definition(self): - return self.var.is_definition() - - @property - def children(self): - # Copy and modify the array. - return [get_instance_el(self._evaluator, self.instance, command, self.is_class_var) - for command in self.var.children] - - @property - @memoize_default() - def name(self): - name = self.var.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - def __iter__(self): - for el in self.var.__iter__(): - yield get_instance_el(self._evaluator, self.instance, el, - self.is_class_var) - - def __getitem__(self, index): - return get_instance_el(self._evaluator, self.instance, self.var[index], - self.is_class_var) - - def __getattr__(self, name): - return getattr(self.var, name) - - def isinstance(self, *cls): - return isinstance(self.var, cls) - - def is_scope(self): - """ - Since we inherit from Base, it would overwrite the action we want here. - """ - return self.var.is_scope() - - def py__call__(self, evaluator, params): - if isinstance(self.var, compiled.CompiledObject): - # This check is a bit strange, but CompiledObject itself is a bit - # more complicated than we would it actually like to be. - return self.var.py__call__(evaluator, params) - else: - return Function.py__call__(self, evaluator, params) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.var) - - -class Wrapper(tree.Base): - def is_scope(self): - return True - - def is_class(self): - return False - - def py__bool__(self): - """ - Since Wrapper is a super class for classes, functions and modules, - the return value will always be true. - """ - return True - - @property - @underscore_memoization - def name(self): - name = self.base.name - return helpers.FakeName(unicode(name), self, name.start_pos) - - -class Class(use_metaclass(CachedMetaClass, Wrapper)): - """ - This class is not only important to extend `tree.Class`, it is also a - important for descriptors (if the descriptor methods are evaluated or not). - """ - def __init__(self, evaluator, base): - self._evaluator = evaluator - self.base = base - - @memoize_default(default=()) - def py__mro__(self, evaluator): - def add(cls): - if cls not in mro: - mro.append(cls) - - mro = [self] - # TODO Do a proper mro resolution. Currently we are just listing - # classes. However, it's a complicated algorithm. - for cls in self.py__bases__(self._evaluator): - # TODO detect for TypeError: duplicate base class str, - # e.g. `class X(str, str): pass` - try: - mro_method = cls.py__mro__ - except AttributeError: - # TODO add a TypeError like: - """ - >>> class Y(lambda: test): pass - Traceback (most recent call last): - File "", line 1, in - TypeError: function() argument 1 must be code, not str - >>> class Y(1): pass - Traceback (most recent call last): - File "", line 1, in - TypeError: int() takes at most 2 arguments (3 given) - """ - pass - else: - add(cls) - for cls_new in mro_method(evaluator): - add(cls_new) - return tuple(mro) - - @memoize_default(default=()) - def py__bases__(self, evaluator): - arglist = self.base.get_super_arglist() - if arglist: - args = param.Arguments(self._evaluator, arglist) - return list(chain.from_iterable(args.eval_args())) - else: - return [compiled.object_obj] - - def py__call__(self, evaluator, params): - return [Instance(evaluator, self, params)] - - def py__getattribute__(self, name): - return self._evaluator.find_types(self, name) - - @property - def params(self): - return self.get_subscope_by_name('__init__').params - - def names_dicts(self, search_global, is_instance=False): - if search_global: - yield self.names_dict - else: - for scope in self.py__mro__(self._evaluator): - if isinstance(scope, compiled.CompiledObject): - yield scope.names_dicts(False, is_instance)[0] - else: - yield scope.names_dict - - def is_class(self): - return True - - def get_subscope_by_name(self, name): - for s in self.py__mro__(self._evaluator): - for sub in reversed(s.subscopes): - if sub.name.value == name: - return sub - raise KeyError("Couldn't find subscope.") - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'raw_doc', - 'doc', 'get_imports', 'get_parent_until', 'get_code', - 'subscopes', 'names_dict', 'type']: - raise AttributeError("Don't touch this: %s of %s !" % (name, self)) - return getattr(self.base, name) - - def __repr__(self): - return "" % (type(self).__name__, self.base) - - -class Function(use_metaclass(CachedMetaClass, Wrapper)): - """ - Needed because of decorators. Decorators are evaluated here. - """ - def __init__(self, evaluator, func, is_decorated=False): - """ This should not be called directly """ - self._evaluator = evaluator - self.base = self.base_func = func - self.is_decorated = is_decorated - # A property that is set by the decorator resolution. - self.decorates = None - - @memoize_default() - def get_decorated_func(self): - """ - Returns the function, that should to be executed in the end. - This is also the places where the decorators are processed. - """ - f = self.base_func - decorators = self.base_func.get_decorators() - - if not decorators or self.is_decorated: - return self - - # Only enter it, if has not already been processed. - if not self.is_decorated: - for dec in reversed(decorators): - debug.dbg('decorator: %s %s', dec, f) - dec_results = self._evaluator.eval_element(dec.children[1]) - trailer = dec.children[2:-1] - if trailer: - # Create a trailer and evaluate it. - trailer = tree.Node('trailer', trailer) - trailer.parent = dec - dec_results = self._evaluator.eval_trailer(dec_results, trailer) - - if not len(dec_results): - debug.warning('decorator not found: %s on %s', dec, self.base_func) - return self - decorator = dec_results.pop() - if dec_results: - debug.warning('multiple decorators found %s %s', - self.base_func, dec_results) - - # Create param array. - if isinstance(f, Function): - old_func = f # TODO this is just hacky. change. - else: - old_func = Function(self._evaluator, f, is_decorated=True) - - wrappers = self._evaluator.execute_evaluated(decorator, old_func) - if not len(wrappers): - debug.warning('no wrappers found %s', self.base_func) - return self - if len(wrappers) > 1: - # TODO resolve issue with multiple wrappers -> multiple types - debug.warning('multiple wrappers found %s %s', - self.base_func, wrappers) - f = wrappers[0] - if isinstance(f, (Instance, Function)): - f.decorates = self - - debug.dbg('decorator end %s', f) - return f - - def names_dicts(self, search_global): - if search_global: - yield self.names_dict - else: - for names_dict in compiled.magic_function_class.names_dicts(False): - yield names_dict - - @Python3Method - def py__call__(self, evaluator, params): - if self.base.is_generator(): - return [iterable.Generator(evaluator, self, params)] - else: - return FunctionExecution(evaluator, self, params).get_return_types() - - def __getattr__(self, name): - return getattr(self.base_func, name) - - def __repr__(self): - dec = '' - if self.decorates is not None: - dec = " decorates " + repr(self.decorates) - return "" % (type(self).__name__, self.base_func, dec) - - -class LambdaWrapper(Function): - def get_decorated_func(self): - return self - - -class FunctionExecution(Executed): - """ - This class is used to evaluate functions and their returns. - - This is the most complicated class, because it contains the logic to - transfer parameters. It is even more complicated, because there may be - multiple calls to functions and recursion has to be avoided. But this is - responsibility of the decorators. - """ - type = 'funcdef' - - def __init__(self, evaluator, base, *args, **kwargs): - super(FunctionExecution, self).__init__(evaluator, base, *args, **kwargs) - self._copy_dict = {} - new_func = helpers.deep_ast_copy(base.base_func, self, self._copy_dict) - self.children = new_func.children - self.names_dict = new_func.names_dict - - @memoize_default(default=()) - @recursion.execution_recursion_decorator - def get_return_types(self, check_yields=False): - func = self.base - - if func.isinstance(LambdaWrapper): - return self._evaluator.eval_element(self.children[-1]) - - if func.listeners: - # Feed the listeners, with the params. - for listener in func.listeners: - listener.execute(self._get_params()) - # If we do have listeners, that means that there's not a regular - # execution ongoing. In this case Jedi is interested in the - # inserted params, not in the actual execution of the function. - return [] - - if check_yields: - types = [] - returns = self.yields - else: - returns = self.returns - types = list(docstrings.find_return_types(self._evaluator, func)) - - for r in returns: - check = flow_analysis.break_check(self._evaluator, self, r) - if check is flow_analysis.UNREACHABLE: - debug.dbg('Return unreachable: %s', r) - else: - types += self._evaluator.eval_element(r.children[1]) - if check is flow_analysis.REACHABLE: - debug.dbg('Return reachable: %s', r) - break - return types - - def names_dicts(self, search_global): - yield self.names_dict - - @memoize_default(default=NO_DEFAULT) - def _get_params(self): - """ - This returns the params for an TODO and is injected as a - 'hack' into the tree.Function class. - This needs to be here, because Instance can have __init__ functions, - which act the same way as normal functions. - """ - return param.get_params(self._evaluator, self.base, self.var_args) - - def param_by_name(self, name): - return [n for n in self._get_params() if str(n) == name][0] - - def name_for_position(self, position): - return tree.Function.name_for_position(self, position) - - def _copy_list(self, lst): - """ - Copies a list attribute of a parser Function. Copying is very - expensive, because it is something like `copy.deepcopy`. However, these - copied objects can be used for the executions, as if they were in the - execution. - """ - objects = [] - for element in lst: - self._scope_copy(element.parent) - copied = helpers.deep_ast_copy(element, self._copy_dict) - objects.append(copied) - return objects - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'imports', 'name', 'type']: - raise AttributeError('Tried to access %s: %s. Why?' % (name, self)) - return getattr(self.base, name) - - def _scope_copy(self, scope): - raise NotImplementedError - """ Copies a scope (e.g. `if foo:`) in an execution """ - if scope != self.base.base_func: - # Just make sure the parents been copied. - self._scope_copy(scope.parent) - helpers.deep_ast_copy(scope, self._copy_dict) - - @common.safe_property - @memoize_default([]) - def returns(self): - return tree.Scope._search_in_scope(self, tree.ReturnStmt) - - @common.safe_property - @memoize_default([]) - def yields(self): - return tree.Scope._search_in_scope(self, tree.YieldExpr) - - @common.safe_property - @memoize_default([]) - def statements(self): - return tree.Scope._search_in_scope(self, tree.ExprStmt) - - @common.safe_property - @memoize_default([]) - def subscopes(self): - return tree.Scope._search_in_scope(self, tree.Scope) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.base) - - -class GlobalName(helpers.FakeName): - def __init__(self, name): - """ - We need to mark global names somehow. Otherwise they are just normal - names that are not definitions. - """ - super(GlobalName, self).__init__(name.value, name.parent, - name.start_pos, is_definition=True) - - -class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): - def __init__(self, evaluator, module): - self._evaluator = evaluator - self.base = self._module = module - - def names_dicts(self, search_global): - yield self.base.names_dict - yield self._module_attributes_dict() - - for star_module in self.star_imports(): - yield star_module.names_dict - - yield dict((str(n), [GlobalName(n)]) for n in self.base.global_names) - yield self._sub_modules_dict() - - # I'm not sure if the star import cache is really that effective anymore - # with all the other really fast import caches. Recheck. Also we would need - # to push the star imports into Evaluator.modules, if we reenable this. - #@cache_star_import - @memoize_default([]) - def star_imports(self): - modules = [] - for i in self.base.imports: - if i.is_star_import(): - name = i.star_import_name() - new = imports.ImportWrapper(self._evaluator, name).follow() - for module in new: - if isinstance(module, tree.Module): - modules += module.star_imports() - modules += new - return modules - - @memoize_default() - def _module_attributes_dict(self): - def parent_callback(): - return self._evaluator.execute(compiled.create(self._evaluator, str))[0] - - names = ['__file__', '__package__', '__doc__', '__name__'] - # All the additional module attributes are strings. - return dict((n, [helpers.LazyName(n, parent_callback, is_definition=True)]) - for n in names) - - @property - @memoize_default() - def name(self): - return helpers.FakeName(unicode(self.base.name), self, (1, 0)) - - def _get_init_directory(self): - for suffix, _, _ in imp.get_suffixes(): - ending = '__init__' + suffix - if self.py__file__().endswith(ending): - # Remove the ending, including the separator. - return self.py__file__()[:-len(ending) - 1] - return None - - def py__name__(self): - for name, module in self._evaluator.modules.items(): - if module == self: - return name - - return '__main__' - - def py__file__(self): - """ - In contrast to Python's __file__ can be None. - """ - if self._module.path is None: - return None - - return os.path.abspath(self._module.path) - - def py__package__(self): - if self._get_init_directory() is None: - return re.sub(r'\.?[^\.]+$', '', self.py__name__()) - else: - return self.py__name__() - - @property - def py__path__(self): - """ - Not seen here, since it's a property. The callback actually uses a - variable, so use it like:: - - foo.py__path__(sys_path) - - In case of a package, this returns Python's __path__ attribute, which - is a list of paths (strings). - Raises an AttributeError if the module is not a package. - """ - def return_value(search_path): - init_path = self.py__file__() - if os.path.basename(init_path) == '__init__.py': - - with open(init_path, 'rb') as f: - content = common.source_to_unicode(f.read()) - # these are strings that need to be used for namespace packages, - # the first one is ``pkgutil``, the second ``pkg_resources``. - options = ('declare_namespace(__name__)', 'extend_path(__path__') - if options[0] in content or options[1] in content: - # It is a namespace, now try to find the rest of the - # modules on sys_path or whatever the search_path is. - paths = set() - for s in search_path: - other = os.path.join(s, unicode(self.name)) - if os.path.isdir(other): - paths.add(other) - return list(paths) - # Default to this. - return [path] - - path = self._get_init_directory() - - if path is None: - raise AttributeError('Only packages have __path__ attributes.') - else: - return return_value - - @memoize_default() - def _sub_modules_dict(self): - """ - Lists modules in the directory of this module (if this module is a - package). - """ - path = self._module.path - names = {} - if path is not None and path.endswith(os.path.sep + '__init__.py'): - mods = pkgutil.iter_modules([os.path.dirname(path)]) - for module_loader, name, is_pkg in mods: - fake_n = helpers.FakeName(name) - # It's obviously a relative import to the current module. - imp = helpers.FakeImport(fake_n, self, level=1) - fake_n.parent = imp - names[name] = [fake_n] - - # TODO add something like this in the future, its cleaner than the - # import hacks. - # ``os.path`` is a hardcoded exception, because it's a - # ``sys.modules`` modification. - #if str(self.name) == 'os': - # names.append(helpers.FakeName('path', parent=self)) - - return names - - def __getattr__(self, name): - return getattr(self._module, name) - - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self._module) diff --git a/pythonFiles/release/jedi/evaluate/site.py b/pythonFiles/release/jedi/evaluate/site.py new file mode 100644 index 000000000000..bf884faefaaf --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/site.py @@ -0,0 +1,110 @@ +"""An adapted copy of relevant site-packages functionality from Python stdlib. + +This file contains some functions related to handling site-packages in Python +with jedi-specific modifications: + +- the functions operate on sys_path argument rather than global sys.path + +- in .pth files "import ..." lines that allow execution of arbitrary code are + skipped to prevent code injection into jedi interpreter + +""" + +# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved + +from __future__ import print_function + +import sys +import os + + +def makepath(*paths): + dir = os.path.join(*paths) + try: + dir = os.path.abspath(dir) + except OSError: + pass + return dir, os.path.normcase(dir) + + +def _init_pathinfo(sys_path): + """Return a set containing all existing directory entries from sys_path""" + d = set() + for dir in sys_path: + try: + if os.path.isdir(dir): + dir, dircase = makepath(dir) + d.add(dircase) + except TypeError: + continue + return d + + +def addpackage(sys_path, sitedir, name, known_paths): + """Process a .pth file within the site-packages directory: + For each line in the file, either combine it with sitedir to a path + and add that to known_paths, or execute it if it starts with 'import '. + """ + if known_paths is None: + known_paths = _init_pathinfo(sys_path) + reset = 1 + else: + reset = 0 + fullname = os.path.join(sitedir, name) + try: + f = open(fullname, "r") + except OSError: + return + with f: + for n, line in enumerate(f): + if line.startswith("#"): + continue + try: + if line.startswith(("import ", "import\t")): + # Change by immerrr: don't evaluate import lines to prevent + # code injection into jedi through pth files. + # + # exec(line) + continue + line = line.rstrip() + dir, dircase = makepath(sitedir, line) + if not dircase in known_paths and os.path.exists(dir): + sys_path.append(dir) + known_paths.add(dircase) + except Exception: + print("Error processing line {:d} of {}:\n".format(n+1, fullname), + file=sys.stderr) + import traceback + for record in traceback.format_exception(*sys.exc_info()): + for line in record.splitlines(): + print(' '+line, file=sys.stderr) + print("\nRemainder of file ignored", file=sys.stderr) + break + if reset: + known_paths = None + return known_paths + + +def addsitedir(sys_path, sitedir, known_paths=None): + """Add 'sitedir' argument to sys_path if missing and handle .pth files in + 'sitedir'""" + if known_paths is None: + known_paths = _init_pathinfo(sys_path) + reset = 1 + else: + reset = 0 + sitedir, sitedircase = makepath(sitedir) + if not sitedircase in known_paths: + sys_path.append(sitedir) # Add path component + known_paths.add(sitedircase) + try: + names = os.listdir(sitedir) + except OSError: + return + names = [name for name in names if name.endswith(".pth")] + for name in sorted(names): + addpackage(sys_path, sitedir, name, known_paths) + if reset: + known_paths = None + return known_paths diff --git a/pythonFiles/release/jedi/evaluate/stdlib.py b/pythonFiles/release/jedi/evaluate/stdlib.py index 20ff5d51aa86..259d692733db 100755 --- a/pythonFiles/release/jedi/evaluate/stdlib.py +++ b/pythonFiles/release/jedi/evaluate/stdlib.py @@ -5,56 +5,85 @@ To add a new implementation, create a function and add it to the ``_implemented`` dict at the bottom of this module. +Note that this module exists only to implement very specific functionality in +the standard library. The usual way to understand the standard library is the +compiled module that returns the types for C-builtins. """ import collections import re -from jedi._compatibility import unicode -from jedi.evaluate import compiled -from jedi.evaluate import representation as er -from jedi.evaluate import iterable -from jedi.parser import Parser -from jedi.parser import tree from jedi import debug -from jedi.evaluate import precedence -from jedi.evaluate import param +from jedi.evaluate.arguments import ValuesArguments +from jedi.evaluate import analysis +from jedi.evaluate import compiled +from jedi.evaluate.context.instance import InstanceFunctionExecution, \ + AbstractInstanceContext, CompiledInstance, BoundMethod, \ + AnonymousInstanceFunctionExecution +from jedi.evaluate.base_context import ContextualizedNode, \ + NO_CONTEXTS, ContextSet +from jedi.evaluate.context import ClassContext, ModuleContext +from jedi.evaluate.context import iterable +from jedi.evaluate.lazy_context import LazyTreeContext +from jedi.evaluate.syntax_tree import is_string + +# Now this is all part of fake tuples in Jedi. However super doesn't work on +# __init__ and __new__ doesn't work at all. So adding this to nametuples is +# just the easiest way. +_NAMEDTUPLE_INIT = """ + def __init__(_cls, {arg_list}): + 'A helper function for namedtuple.' + self.__iterable = ({arg_list}) + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + +""" class NotInStdLib(LookupError): pass -def execute(evaluator, obj, params): +def execute(evaluator, obj, arguments): + if isinstance(obj, BoundMethod): + raise NotInStdLib() + try: - obj_name = str(obj.name) + obj_name = obj.name.string_name except AttributeError: pass else: - if obj.parent == compiled.builtin: + if obj.parent_context == evaluator.BUILTINS: module_name = 'builtins' - elif isinstance(obj.parent, tree.Module): - module_name = str(obj.parent.name) + elif isinstance(obj.parent_context, ModuleContext): + module_name = obj.parent_context.name.string_name else: module_name = '' # for now we just support builtin functions. try: - return _implemented[module_name][obj_name](evaluator, obj, params) + func = _implemented[module_name][obj_name] except KeyError: pass + else: + return func(evaluator, obj, arguments) raise NotInStdLib() -def _follow_param(evaluator, params, index): +def _follow_param(evaluator, arguments, index): try: - key, values = list(params.unpack())[index] + key, lazy_context = list(arguments.unpack())[index] except IndexError: - return [] + return NO_CONTEXTS else: - return iterable.unite(evaluator.eval_element(v) for v in values) + return lazy_context.infer() -def argument_clinic(string, want_obj=False, want_scope=False): +def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False): """ Works like Argument Clinic (PEP 436), to validate function params. """ @@ -77,116 +106,150 @@ def argument_clinic(string, want_obj=False, want_scope=False): def f(func): def wrapper(evaluator, obj, arguments): + debug.dbg('builtin start %s' % obj, color='MAGENTA') try: lst = list(arguments.eval_argument_clinic(clinic_args)) except ValueError: - return [] + return NO_CONTEXTS else: kwargs = {} - if want_scope: - kwargs['scope'] = arguments.scope() + if want_context: + kwargs['context'] = arguments.context if want_obj: kwargs['obj'] = obj + if want_arguments: + kwargs['arguments'] = arguments return func(evaluator, *lst, **kwargs) + finally: + debug.dbg('builtin end', color='MAGENTA') return wrapper return f +@argument_clinic('iterator[, default], /') +def builtins_next(evaluator, iterators, defaults): + """ + TODO this function is currently not used. It's a stab at implementing next + in a different way than fake objects. This would be a bit more flexible. + """ + if evaluator.python_version[0] == 2: + name = 'next' + else: + name = '__next__' + + context_set = NO_CONTEXTS + for iterator in iterators: + if isinstance(iterator, AbstractInstanceContext): + context_set = ContextSet.from_sets( + n.infer() + for filter in iterator.get_filters(include_self_names=True) + for n in filter.get(name) + ).execute_evaluated() + if context_set: + return context_set + return defaults + + @argument_clinic('object, name[, default], /') def builtins_getattr(evaluator, objects, names, defaults=None): - types = [] # follow the first param for obj in objects: - if not isinstance(obj, (er.Instance, er.Class, tree.Module, compiled.CompiledObject)): - debug.warning('getattr called without instance') - continue - for name in names: - if precedence.is_string(name): - return evaluator.find_types(obj, name.obj) + if is_string(name): + return obj.py__getattribute__(name.obj) else: debug.warning('getattr called without str') continue - return types + return NO_CONTEXTS @argument_clinic('object[, bases, dict], /') def builtins_type(evaluator, objects, bases, dicts): if bases or dicts: - # metaclass... maybe someday... - return [] + # It's a type creation... maybe someday... + return NO_CONTEXTS else: - return [o.base for o in objects if isinstance(o, er.Instance)] + return objects.py__class__() -class SuperInstance(er.Instance): +class SuperInstance(AbstractInstanceContext): """To be used like the object ``super`` returns.""" def __init__(self, evaluator, cls): su = cls.py_mro()[1] super().__init__(evaluator, su and su[0] or self) -@argument_clinic('[type[, obj]], /', want_scope=True) -def builtins_super(evaluator, types, objects, scope): +@argument_clinic('[type[, obj]], /', want_context=True) +def builtins_super(evaluator, types, objects, context): # TODO make this able to detect multiple inheritance super - accept = (tree.Function, er.FunctionExecution) - if scope.isinstance(*accept): - wanted = (tree.Class, er.Instance) - cls = scope.get_parent_until(accept + wanted, - include_current=False) - if isinstance(cls, wanted): - if isinstance(cls, tree.Class): - cls = er.Class(evaluator, cls) - elif isinstance(cls, er.Instance): - cls = cls.base - su = cls.py__bases__(evaluator) - if su: - return evaluator.execute(su[0]) - return [] - - -@argument_clinic('sequence, /', want_obj=True) -def builtins_reversed(evaluator, sequences, obj): - # Unpack the iterator values - objects = tuple(iterable.get_iterator_types(sequences)) - rev = [iterable.AlreadyEvaluated([o]) for o in reversed(objects)] + if isinstance(context, (InstanceFunctionExecution, + AnonymousInstanceFunctionExecution)): + su = context.instance.py__class__().py__bases__() + return su[0].infer().execute_evaluated() + return NO_CONTEXTS + + +@argument_clinic('sequence, /', want_obj=True, want_arguments=True) +def builtins_reversed(evaluator, sequences, obj, arguments): + # While we could do without this variable (just by using sequences), we + # want static analysis to work well. Therefore we need to generated the + # values again. + key, lazy_context = next(arguments.unpack()) + cn = None + if isinstance(lazy_context, LazyTreeContext): + # TODO access private + cn = ContextualizedNode(lazy_context._context, lazy_context.data) + ordered = list(sequences.iterate(cn)) + + rev = list(reversed(ordered)) # Repack iterator values and then run it the normal way. This is # necessary, because `reversed` is a function and autocompletion # would fail in certain cases like `reversed(x).__iter__` if we # just returned the result directly. - rev = iterable.AlreadyEvaluated( - [iterable.FakeSequence(evaluator, rev, 'list')] - ) - return [er.Instance(evaluator, obj, param.Arguments(evaluator, [rev]))] + seq = iterable.FakeSequence(evaluator, 'list', rev) + arguments = ValuesArguments([ContextSet(seq)]) + return ContextSet(CompiledInstance(evaluator, evaluator.BUILTINS, obj, arguments)) -@argument_clinic('obj, type, /') -def builtins_isinstance(evaluator, objects, types): - bool_results = set([]) +@argument_clinic('obj, type, /', want_arguments=True) +def builtins_isinstance(evaluator, objects, types, arguments): + bool_results = set() for o in objects: try: - mro_func = o.py__class__(evaluator).py__mro__ + mro_func = o.py__class__().py__mro__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. - return [compiled.true_obj, compiled.false_obj] + return ContextSet(compiled.create(True), compiled.create(False)) - mro = mro_func(evaluator) + mro = mro_func() for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) - else: + elif cls_or_tup.name.string_name == 'tuple' \ + and cls_or_tup.get_root_context() == evaluator.BUILTINS: # Check for tuples. - classes = iterable.get_iterator_types([cls_or_tup]) + classes = ContextSet.from_sets( + lazy_context.infer() + for lazy_context in cls_or_tup.iterate() + ) bool_results.add(any(cls in mro for cls in classes)) + else: + _, lazy_context = list(arguments.unpack())[1] + if isinstance(lazy_context, LazyTreeContext): + node = lazy_context.data + message = 'TypeError: isinstance() arg 2 must be a ' \ + 'class, type, or tuple of classes and types, ' \ + 'not %s.' % cls_or_tup + analysis.add(lazy_context._context, 'type-error-isinstance', node, message) - return [compiled.keyword_from_value(x) for x in bool_results] + return ContextSet.from_iterable(compiled.create(evaluator, x) for x in bool_results) -def collections_namedtuple(evaluator, obj, params): +def collections_namedtuple(evaluator, obj, arguments): """ Implementation of the namedtuple function. @@ -198,35 +261,41 @@ def collections_namedtuple(evaluator, obj, params): """ # Namedtuples are not supported on Python 2.6 if not hasattr(collections, '_class_template'): - return [] + return NO_CONTEXTS # Process arguments - name = _follow_param(evaluator, params, 0)[0].obj - _fields = _follow_param(evaluator, params, 1)[0] + # TODO here we only use one of the types, we should use all. + name = list(_follow_param(evaluator, arguments, 0))[0].obj + _fields = list(_follow_param(evaluator, arguments, 1))[0] if isinstance(_fields, compiled.CompiledObject): fields = _fields.obj.replace(',', ' ').split() - elif isinstance(_fields, iterable.Array): - try: - fields = [v.obj for v in _fields.values()] - except AttributeError: - return [] + elif isinstance(_fields, iterable.AbstractIterable): + fields = [ + v.obj + for lazy_context in _fields.py__iter__() + for v in lazy_context.infer() if hasattr(v, 'obj') + ] else: - return [] + return NO_CONTEXTS + base = collections._class_template + base += _NAMEDTUPLE_INIT # Build source - source = collections._class_template.format( + source = base.format( typename=name, - field_names=fields, + field_names=tuple(fields), num_fields=len(fields), - arg_list=', '.join(fields), + arg_list = repr(tuple(fields)).replace("'", "")[1:-1], repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields), field_defs='\n'.join(collections._field_template.format(index=index, name=name) for index, name in enumerate(fields)) ) # Parse source - generated_class = Parser(evaluator.grammar, unicode(source)).module.subscopes[0] - return [er.Class(evaluator, generated_class)] + module = evaluator.grammar.parse(source) + generated_class = next(module.iter_classdefs()) + parent_context = ModuleContext(evaluator, module, '') + return ContextSet(ClassContext(evaluator, parent_context, generated_class)) @argument_clinic('first, /') @@ -247,8 +316,8 @@ def _return_first_param(evaluator, firsts): 'deepcopy': _return_first_param, }, 'json': { - 'load': lambda *args: [], - 'loads': lambda *args: [], + 'load': lambda *args: NO_CONTEXTS, + 'loads': lambda *args: NO_CONTEXTS, }, 'collections': { 'namedtuple': collections_namedtuple, diff --git a/pythonFiles/release/jedi/evaluate/syntax_tree.py b/pythonFiles/release/jedi/evaluate/syntax_tree.py new file mode 100644 index 000000000000..1d847a4960de --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/syntax_tree.py @@ -0,0 +1,588 @@ +""" +Functions evaluating the syntax tree. +""" +import copy +import operator as op + +from parso.python import tree + +from jedi import debug +from jedi import parser_utils +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, ContextualizedNode, \ + ContextualizedName, iterator_to_context_set, iterate_contexts +from jedi.evaluate import compiled +from jedi.evaluate import pep0484 +from jedi.evaluate import recursion +from jedi.evaluate import helpers +from jedi.evaluate import analysis +from jedi.evaluate import imports +from jedi.evaluate import arguments +from jedi.evaluate.context import ClassContext, FunctionContext +from jedi.evaluate.context import iterable +from jedi.evaluate.context import TreeInstance, CompiledInstance +from jedi.evaluate.finder import NameFinder +from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled + + +def _limit_context_infers(func): + """ + This is for now the way how we limit type inference going wild. There are + other ways to ensure recursion limits as well. This is mostly necessary + because of instance (self) access that can be quite tricky to limit. + + I'm still not sure this is the way to go, but it looks okay for now and we + can still go anther way in the future. Tests are there. ~ dave + """ + def wrapper(context, *args, **kwargs): + n = context.tree_node + evaluator = context.evaluator + try: + evaluator.inferred_element_counts[n] += 1 + if evaluator.inferred_element_counts[n] > 300: + debug.warning('In context %s there were too many inferences.', n) + return NO_CONTEXTS + except KeyError: + evaluator.inferred_element_counts[n] = 1 + return func(context, *args, **kwargs) + + return wrapper + + +@debug.increase_indent +@_limit_context_infers +def eval_node(context, element): + debug.dbg('eval_element %s@%s', element, element.start_pos) + evaluator = context.evaluator + typ = element.type + if typ in ('name', 'number', 'string', 'atom'): + return eval_atom(context, element) + elif typ == 'keyword': + # For False/True/None + if element.value in ('False', 'True', 'None'): + return ContextSet(compiled.builtin_from_name(evaluator, element.value)) + # else: print e.g. could be evaluated like this in Python 2.7 + return NO_CONTEXTS + elif typ == 'lambdef': + return ContextSet(FunctionContext(evaluator, context, element)) + elif typ == 'expr_stmt': + return eval_expr_stmt(context, element) + elif typ in ('power', 'atom_expr'): + first_child = element.children[0] + if not (first_child.type == 'keyword' and first_child.value == 'await'): + context_set = eval_atom(context, first_child) + for trailer in element.children[1:]: + if trailer == '**': # has a power operation. + right = evaluator.eval_element(context, element.children[2]) + context_set = _eval_comparison( + evaluator, + context, + context_set, + trailer, + right + ) + break + context_set = eval_trailer(context, context_set, trailer) + return context_set + return NO_CONTEXTS + elif typ in ('testlist_star_expr', 'testlist',): + # The implicit tuple in statements. + return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element)) + elif typ in ('not_test', 'factor'): + context_set = context.eval_node(element.children[-1]) + for operator in element.children[:-1]: + context_set = eval_factor(context_set, operator) + return context_set + elif typ == 'test': + # `x if foo else y` case. + return (context.eval_node(element.children[0]) | + context.eval_node(element.children[-1])) + elif typ == 'operator': + # Must be an ellipsis, other operators are not evaluated. + # In Python 2 ellipsis is coded as three single dot tokens, not + # as one token 3 dot token. + assert element.value in ('.', '...') + return ContextSet(compiled.create(evaluator, Ellipsis)) + elif typ == 'dotted_name': + context_set = eval_atom(context, element.children[0]) + for next_name in element.children[2::2]: + # TODO add search_global=True? + context_set = context_set.py__getattribute__(next_name, name_context=context) + return context_set + elif typ == 'eval_input': + return eval_node(context, element.children[0]) + elif typ == 'annassign': + return pep0484._evaluate_for_annotation(context, element.children[1]) + else: + return eval_or_test(context, element) + + +def eval_trailer(context, base_contexts, trailer): + trailer_op, node = trailer.children[:2] + if node == ')': # `arglist` is optional. + node = () + + if trailer_op == '[': + trailer_op, node, _ = trailer.children + + # TODO It's kind of stupid to cast this from a context set to a set. + foo = set(base_contexts) + # special case: PEP0484 typing module, see + # https://github.com/davidhalter/jedi/issues/663 + result = ContextSet() + for typ in list(foo): + if isinstance(typ, (ClassContext, TreeInstance)): + typing_module_types = pep0484.py__getitem__(context, typ, node) + if typing_module_types is not None: + foo.remove(typ) + result |= typing_module_types + + return result | base_contexts.get_item( + eval_subscript_list(context.evaluator, context, node), + ContextualizedNode(context, trailer) + ) + else: + debug.dbg('eval_trailer: %s in %s', trailer, base_contexts) + if trailer_op == '.': + return base_contexts.py__getattribute__( + name_context=context, + name_or_str=node + ) + else: + assert trailer_op == '(' + args = arguments.TreeArguments(context.evaluator, context, node, trailer) + return base_contexts.execute(args) + + +def eval_atom(context, atom): + """ + Basically to process ``atom`` nodes. The parser sometimes doesn't + generate the node (because it has just one child). In that case an atom + might be a name or a literal as well. + """ + if atom.type == 'name': + # This is the first global lookup. + stmt = tree.search_ancestor( + atom, 'expr_stmt', 'lambdef' + ) or atom + if stmt.type == 'lambdef': + stmt = atom + return context.py__getattribute__( + name_or_str=atom, + position=stmt.start_pos, + search_global=True + ) + + elif isinstance(atom, tree.Literal): + string = parser_utils.safe_literal_eval(atom.value) + return ContextSet(compiled.create(context.evaluator, string)) + else: + c = atom.children + if c[0].type == 'string': + # Will be one string. + context_set = eval_atom(context, c[0]) + for string in c[1:]: + right = eval_atom(context, string) + context_set = _eval_comparison(context.evaluator, context, context_set, '+', right) + return context_set + # Parentheses without commas are not tuples. + elif c[0] == '(' and not len(c) == 2 \ + and not(c[1].type == 'testlist_comp' and + len(c[1].children) > 1): + return context.eval_node(c[1]) + + try: + comp_for = c[1].children[1] + except (IndexError, AttributeError): + pass + else: + if comp_for == ':': + # Dict comprehensions have a colon at the 3rd index. + try: + comp_for = c[1].children[3] + except IndexError: + pass + + if comp_for.type == 'comp_for': + return ContextSet(iterable.Comprehension.from_atom(context.evaluator, context, atom)) + + # It's a dict/list/tuple literal. + array_node = c[1] + try: + array_node_c = array_node.children + except AttributeError: + array_node_c = [] + if c[0] == '{' and (array_node == '}' or ':' in array_node_c): + context = iterable.DictLiteralContext(context.evaluator, context, atom) + else: + context = iterable.SequenceLiteralContext(context.evaluator, context, atom) + return ContextSet(context) + + +@_limit_context_infers +def eval_expr_stmt(context, stmt, seek_name=None): + with recursion.execution_allowed(context.evaluator, stmt) as allowed: + if allowed or context.get_root_context() == context.evaluator.BUILTINS: + return _eval_expr_stmt(context, stmt, seek_name) + return NO_CONTEXTS + + +@debug.increase_indent +def _eval_expr_stmt(context, stmt, seek_name=None): + """ + The starting point of the completion. A statement always owns a call + list, which are the calls, that a statement does. In case multiple + names are defined in the statement, `seek_name` returns the result for + this name. + + :param stmt: A `tree.ExprStmt`. + """ + debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name) + rhs = stmt.get_rhs() + context_set = context.eval_node(rhs) + + if seek_name: + c_node = ContextualizedName(context, seek_name) + context_set = check_tuple_assignments(context.evaluator, c_node, context_set) + + first_operator = next(stmt.yield_operators(), None) + if first_operator not in ('=', None) and first_operator.type == 'operator': + # `=` is always the last character in aug assignments -> -1 + operator = copy.copy(first_operator) + operator.value = operator.value[:-1] + name = stmt.get_defined_names()[0].value + left = context.py__getattribute__( + name, position=stmt.start_pos, search_global=True) + + for_stmt = tree.search_ancestor(stmt, 'for_stmt') + if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \ + and parser_utils.for_stmt_defines_one_name(for_stmt): + # Iterate through result and add the values, that's possible + # only in for loops without clutter, because they are + # predictable. Also only do it, if the variable is not a tuple. + node = for_stmt.get_testlist() + cn = ContextualizedNode(context, node) + ordered = list(cn.infer().iterate(cn)) + + for lazy_context in ordered: + dct = {for_stmt.children[1].value: lazy_context.infer()} + with helpers.predefine_names(context, for_stmt, dct): + t = context.eval_node(rhs) + left = _eval_comparison(context.evaluator, context, left, operator, t) + context_set = left + else: + context_set = _eval_comparison(context.evaluator, context, left, operator, context_set) + debug.dbg('eval_expr_stmt result %s', context_set) + return context_set + + +def eval_or_test(context, or_test): + iterator = iter(or_test.children) + types = context.eval_node(next(iterator)) + for operator in iterator: + right = next(iterator) + if operator.type == 'comp_op': # not in / is not + operator = ' '.join(c.value for c in operator.children) + + # handle lazy evaluation of and/or here. + if operator in ('and', 'or'): + left_bools = set(left.py__bool__() for left in types) + if left_bools == set([True]): + if operator == 'and': + types = context.eval_node(right) + elif left_bools == set([False]): + if operator != 'and': + types = context.eval_node(right) + # Otherwise continue, because of uncertainty. + else: + types = _eval_comparison(context.evaluator, context, types, operator, + context.eval_node(right)) + debug.dbg('eval_or_test types %s', types) + return types + + +@iterator_to_context_set +def eval_factor(context_set, operator): + """ + Calculates `+`, `-`, `~` and `not` prefixes. + """ + for context in context_set: + if operator == '-': + if is_number(context): + yield compiled.create(context.evaluator, -context.obj) + elif operator == 'not': + value = context.py__bool__() + if value is None: # Uncertainty. + return + yield compiled.create(context.evaluator, not value) + else: + yield context + + +# Maps Python syntax to the operator module. +COMPARISON_OPERATORS = { + '==': op.eq, + '!=': op.ne, + 'is': op.is_, + 'is not': op.is_not, + '<': op.lt, + '<=': op.le, + '>': op.gt, + '>=': op.ge, +} + + +def _literals_to_types(evaluator, result): + # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), + # int(), float(), etc). + new_result = NO_CONTEXTS + for typ in result: + if is_literal(typ): + # Literals are only valid as long as the operations are + # correct. Otherwise add a value-free instance. + cls = compiled.builtin_from_name(evaluator, typ.name.string_name) + new_result |= cls.execute_evaluated() + else: + new_result |= ContextSet(typ) + return new_result + + +def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts): + if not left_contexts or not right_contexts: + # illegal slices e.g. cause left/right_result to be None + result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS) + return _literals_to_types(evaluator, result) + else: + # I don't think there's a reasonable chance that a string + # operation is still correct, once we pass something like six + # objects. + if len(left_contexts) * len(right_contexts) > 6: + return _literals_to_types(evaluator, left_contexts | right_contexts) + else: + return ContextSet.from_sets( + _eval_comparison_part(evaluator, context, left, operator, right) + for left in left_contexts + for right in right_contexts + ) + + +def _is_tuple(context): + return isinstance(context, iterable.AbstractIterable) and context.array_type == 'tuple' + + +def _is_list(context): + return isinstance(context, iterable.AbstractIterable) and context.array_type == 'list' + + +def _eval_comparison_part(evaluator, context, left, operator, right): + l_is_num = is_number(left) + r_is_num = is_number(right) + if operator == '*': + # for iterables, ignore * operations + if isinstance(left, iterable.AbstractIterable) or is_string(left): + return ContextSet(left) + elif isinstance(right, iterable.AbstractIterable) or is_string(right): + return ContextSet(right) + elif operator == '+': + if l_is_num and r_is_num or is_string(left) and is_string(right): + return ContextSet(compiled.create(evaluator, left.obj + right.obj)) + elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): + return ContextSet(iterable.MergedArray(evaluator, (left, right))) + elif operator == '-': + if l_is_num and r_is_num: + return ContextSet(compiled.create(evaluator, left.obj - right.obj)) + elif operator == '%': + # With strings and numbers the left type typically remains. Except for + # `int() % float()`. + return ContextSet(left) + elif operator in COMPARISON_OPERATORS: + operation = COMPARISON_OPERATORS[operator] + if is_compiled(left) and is_compiled(right): + # Possible, because the return is not an option. Just compare. + left = left.obj + right = right.obj + + try: + result = operation(left, right) + except TypeError: + # Could be True or False. + return ContextSet(compiled.create(evaluator, True), compiled.create(evaluator, False)) + else: + return ContextSet(compiled.create(evaluator, result)) + elif operator == 'in': + return NO_CONTEXTS + + def check(obj): + """Checks if a Jedi object is either a float or an int.""" + return isinstance(obj, CompiledInstance) and \ + obj.name.string_name in ('int', 'float') + + # Static analysis, one is a number, the other one is not. + if operator in ('+', '-') and l_is_num != r_is_num \ + and not (check(left) or check(right)): + message = "TypeError: unsupported operand type(s) for +: %s and %s" + analysis.add(context, 'type-error-operation', operator, + message % (left, right)) + + return ContextSet(left, right) + + +def _remove_statements(evaluator, context, stmt, name): + """ + This is the part where statements are being stripped. + + Due to lazy evaluation, statements like a = func; b = a; b() have to be + evaluated. + """ + pep0484_contexts = \ + pep0484.find_type_from_comment_hint_assign(context, stmt, name) + if pep0484_contexts: + return pep0484_contexts + + return eval_expr_stmt(context, stmt, seek_name=name) + + +def tree_name_to_contexts(evaluator, context, tree_name): + types = [] + node = tree_name.get_definition(import_name_always=True) + if node is None: + node = tree_name.parent + if node.type == 'global_stmt': + context = evaluator.create_context(context, tree_name) + finder = NameFinder(evaluator, context, context, tree_name.value) + filters = finder.get_filters(search_global=True) + # For global_stmt lookups, we only need the first possible scope, + # which means the function itself. + filters = [next(filters)] + return finder.find(filters, attribute_lookup=False) + elif node.type not in ('import_from', 'import_name'): + raise ValueError("Should not happen.") + + typ = node.type + if typ == 'for_stmt': + types = pep0484.find_type_from_comment_hint_for(context, node, tree_name) + if types: + return types + if typ == 'with_stmt': + types = pep0484.find_type_from_comment_hint_with(context, node, tree_name) + if types: + return types + + if typ in ('for_stmt', 'comp_for'): + try: + types = context.predefined_names[node][tree_name.value] + except KeyError: + cn = ContextualizedNode(context, node.children[3]) + for_types = iterate_contexts(cn.infer(), cn) + c_node = ContextualizedName(context, tree_name) + types = check_tuple_assignments(evaluator, c_node, for_types) + elif typ == 'expr_stmt': + types = _remove_statements(evaluator, context, node, tree_name) + elif typ == 'with_stmt': + context_managers = context.eval_node(node.get_test_node_from_name(tree_name)) + enter_methods = context_managers.py__getattribute__('__enter__') + return enter_methods.execute_evaluated() + elif typ in ('import_from', 'import_name'): + types = imports.infer_import(context, tree_name) + elif typ in ('funcdef', 'classdef'): + types = _apply_decorators(context, node) + elif typ == 'try_stmt': + # TODO an exception can also be a tuple. Check for those. + # TODO check for types that are not classes and add it to + # the static analysis report. + exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling()) + types = exceptions.execute_evaluated() + else: + raise ValueError("Should not happen.") + return types + + +def _apply_decorators(context, node): + """ + Returns the function, that should to be executed in the end. + This is also the places where the decorators are processed. + """ + if node.type == 'classdef': + decoratee_context = ClassContext( + context.evaluator, + parent_context=context, + classdef=node + ) + else: + decoratee_context = FunctionContext( + context.evaluator, + parent_context=context, + funcdef=node + ) + initial = values = ContextSet(decoratee_context) + for dec in reversed(node.get_decorators()): + debug.dbg('decorator: %s %s', dec, values) + dec_values = context.eval_node(dec.children[1]) + trailer_nodes = dec.children[2:-1] + if trailer_nodes: + # Create a trailer and evaluate it. + trailer = tree.PythonNode('trailer', trailer_nodes) + trailer.parent = dec + dec_values = eval_trailer(context, dec_values, trailer) + + if not len(dec_values): + debug.warning('decorator not found: %s on %s', dec, node) + return initial + + values = dec_values.execute(arguments.ValuesArguments([values])) + if not len(values): + debug.warning('not possible to resolve wrappers found %s', node) + return initial + + debug.dbg('decorator end %s', values) + return values + + +def check_tuple_assignments(evaluator, contextualized_name, context_set): + """ + Checks if tuples are assigned. + """ + lazy_context = None + for index, node in contextualized_name.assignment_indexes(): + cn = ContextualizedNode(contextualized_name.context, node) + iterated = context_set.iterate(cn) + for _ in range(index + 1): + try: + lazy_context = next(iterated) + except StopIteration: + # We could do this with the default param in next. But this + # would allow this loop to run for a very long time if the + # index number is high. Therefore break if the loop is + # finished. + return ContextSet() + context_set = lazy_context.infer() + return context_set + + +def eval_subscript_list(evaluator, context, index): + """ + Handles slices in subscript nodes. + """ + if index == ':': + # Like array[:] + return ContextSet(iterable.Slice(context, None, None, None)) + + elif index.type == 'subscript' and not index.children[0] == '.': + # subscript basically implies a slice operation, except for Python 2's + # Ellipsis. + # e.g. array[:3] + result = [] + for el in index.children: + if el == ':': + if not result: + result.append(None) + elif el.type == 'sliceop': + if len(el.children) == 2: + result.append(el.children[1]) + else: + result.append(el) + result += [None] * (3 - len(result)) + + return ContextSet(iterable.Slice(context, *result)) + + # No slices + return context.eval_node(index) diff --git a/pythonFiles/release/jedi/evaluate/sys_path.py b/pythonFiles/release/jedi/evaluate/sys_path.py index 7cfbd57b06ef..82e5e9df9ceb 100755 --- a/pythonFiles/release/jedi/evaluate/sys_path.py +++ b/pythonFiles/release/jedi/evaluate/sys_path.py @@ -1,34 +1,63 @@ import glob import os import sys - -from jedi._compatibility import exec_function, unicode -from jedi.parser import tree -from jedi.parser import Parser -from jedi.evaluate.cache import memoize_default +import imp +from jedi.evaluate.site import addsitedir + +from jedi._compatibility import unicode +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.base_context import ContextualizedNode +from jedi.evaluate.helpers import is_string +from jedi import settings from jedi import debug -from jedi import common -from jedi import cache +from jedi.evaluate.utils import ignored -def get_sys_path(): - def check_virtual_env(sys_path): - """ Add virtualenv's site-packages to the `sys.path`.""" - venv = os.getenv('VIRTUAL_ENV') - if not venv: - return - venv = os.path.abspath(venv) - p = _get_venv_sitepackages(venv) - if p not in sys_path: - sys_path.insert(0, p) +def get_venv_path(venv): + """Get sys.path for specified virtual environment.""" + sys_path = _get_venv_path_dirs(venv) + with ignored(ValueError): + sys_path.remove('') + sys_path = _get_sys_path_with_egglinks(sys_path) + # As of now, get_venv_path_dirs does not scan built-in pythonpath and + # user-local site-packages, let's approximate them using path from Jedi + # interpreter. + return sys_path + sys.path + - # Add all egg-links from the virtualenv. - for egg_link in glob.glob(os.path.join(p, '*.egg-link')): +def _get_sys_path_with_egglinks(sys_path): + """Find all paths including those referenced by egg-links. + + Egg-link-referenced directories are inserted into path immediately before + the directory on which their links were found. Such directories are not + taken into consideration by normal import mechanism, but they are traversed + when doing pkg_resources.require. + """ + result = [] + for p in sys_path: + # pkg_resources does not define a specific order for egg-link files + # using os.listdir to enumerate them, we're sorting them to have + # reproducible tests. + for egg_link in sorted(glob.glob(os.path.join(p, '*.egg-link'))): with open(egg_link) as fd: - sys_path.insert(0, fd.readline().rstrip()) + for line in fd: + line = line.strip() + if line: + result.append(os.path.join(p, line)) + # pkg_resources package only interprets the first + # non-empty line in egg-link files. + break + result.append(p) + return result - check_virtual_env(sys.path) - return [p for p in sys.path if p != ""] + +def _get_venv_path_dirs(venv): + """Get sys.path for venv without starting up the interpreter.""" + venv = os.path.abspath(venv) + sitedir = _get_venv_sitepackages(venv) + sys_path = [] + addsitedir(sys_path, sitedir) + return sys_path def _get_venv_sitepackages(venv): @@ -40,24 +69,21 @@ def _get_venv_sitepackages(venv): return p -def _execute_code(module_path, code): - c = "import os; from os.path import *; result=%s" - variables = {'__file__': module_path} - try: - exec_function(c % code, variables) - except Exception: - debug.warning('sys.path manipulation detected, but failed to evaluate.') - else: - try: - res = variables['result'] - if isinstance(res, str): - return [os.path.abspath(res)] - except KeyError: - pass - return [] +def _abs_path(module_context, path): + module_path = module_context.py__file__() + if os.path.isabs(path): + return path + + if module_path is None: + # In this case we have no idea where we actually are in the file + # system. + return None + + base_dir = os.path.dirname(module_path) + return os.path.abspath(os.path.join(base_dir, path)) -def _paths_from_assignment(evaluator, expr_stmt): +def _paths_from_assignment(module_context, expr_stmt): """ Extracts the assigned strings from an assignment that looks as follows:: @@ -71,15 +97,16 @@ def _paths_from_assignment(evaluator, expr_stmt): for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): try: assert operator in ['=', '+='] - assert tree.is_node(assignee, 'power') and len(assignee.children) > 1 + assert assignee.type in ('power', 'atom_expr') and \ + len(assignee.children) > 1 c = assignee.children assert c[0].type == 'name' and c[0].value == 'sys' trailer = c[1] assert trailer.children[0] == '.' and trailer.children[1].value == 'path' # TODO Essentially we're not checking details on sys.path # manipulation. Both assigment of the sys.path and changing/adding - # parts of the sys.path are the same: They get added to the current - # sys.path. + # parts of the sys.path are the same: They get added to the end of + # the current sys.path. """ execution = c[2] assert execution.children[0] == '[' @@ -90,101 +117,105 @@ def _paths_from_assignment(evaluator, expr_stmt): except AssertionError: continue - from jedi.evaluate.iterable import get_iterator_types - from jedi.evaluate.precedence import is_string - for val in get_iterator_types(evaluator.eval_statement(expr_stmt)): - if is_string(val): - yield val.obj + cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt) + for lazy_context in cn.infer().iterate(cn): + for context in lazy_context.infer(): + if is_string(context): + abs_path = _abs_path(module_context, context.obj) + if abs_path is not None: + yield abs_path -def _paths_from_list_modifications(module_path, trailer1, trailer2): +def _paths_from_list_modifications(module_context, trailer1, trailer2): """ extract the path from either "sys.path.append" or "sys.path.insert" """ # Guarantee that both are trailers, the first one a name and the second one # a function execution with at least one param. - if not (tree.is_node(trailer1, 'trailer') and trailer1.children[0] == '.' - and tree.is_node(trailer2, 'trailer') and trailer2.children[0] == '(' + if not (trailer1.type == 'trailer' and trailer1.children[0] == '.' + and trailer2.type == 'trailer' and trailer2.children[0] == '(' and len(trailer2.children) == 3): - return [] + return name = trailer1.children[1].value if name not in ['insert', 'append']: - return [] - + return arg = trailer2.children[1] if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. arg = arg.children[2] - return _execute_code(module_path, arg.get_code()) + + for context in module_context.create_context(arg).eval_node(arg): + if is_string(context): + abs_path = _abs_path(module_context, context.obj) + if abs_path is not None: + yield abs_path -def _check_module(evaluator, module): +@evaluator_method_cache(default=[]) +def check_sys_path_modifications(module_context): + """ + Detect sys.path modifications within module. + """ def get_sys_path_powers(names): for name in names: power = name.parent.parent - if tree.is_node(power, 'power'): + if power.type in ('power', 'atom_expr'): c = power.children - if isinstance(c[0], tree.Name) and c[0].value == 'sys' \ - and tree.is_node(c[1], 'trailer'): + if c[0].type == 'name' and c[0].value == 'sys' \ + and c[1].type == 'trailer': n = c[1].children[1] - if isinstance(n, tree.Name) and n.value == 'path': + if n.type == 'name' and n.value == 'path': yield name, power - sys_path = list(get_sys_path()) # copy + if module_context.tree_node is None: + return [] + + added = [] try: - possible_names = module.used_names['path'] + possible_names = module_context.tree_node.get_used_names()['path'] except KeyError: pass else: for name, power in get_sys_path_powers(possible_names): - stmt = name.get_definition() + expr_stmt = power.parent if len(power.children) >= 4: - sys_path.extend(_paths_from_list_modifications(module.path, *power.children[2:4])) - elif name.get_definition().type == 'expr_stmt': - sys_path.extend(_paths_from_assignment(evaluator, stmt)) - return sys_path + added.extend( + _paths_from_list_modifications( + module_context, *power.children[2:4] + ) + ) + elif expr_stmt is not None and expr_stmt.type == 'expr_stmt': + added.extend(_paths_from_assignment(module_context, expr_stmt)) + return added -@memoize_default(evaluator_is_first_arg=True, default=[]) -def sys_path_with_modifications(evaluator, module): - if module.path is None: - # Support for modules without a path is bad, therefore return the - # normal path. - return list(get_sys_path()) +def sys_path_with_modifications(evaluator, module_context): + return evaluator.project.sys_path + check_sys_path_modifications(module_context) - curdir = os.path.abspath(os.curdir) - with common.ignored(OSError): - os.chdir(os.path.dirname(module.path)) +def detect_additional_paths(evaluator, script_path): + django_paths = _detect_django_path(script_path) buildout_script_paths = set() - result = _check_module(evaluator, module) - result += _detect_django_path(module.path) - for buildout_script in _get_buildout_scripts(module.path): - for path in _get_paths_from_buildout_script(evaluator, buildout_script): + for buildout_script_path in _get_buildout_script_paths(script_path): + for path in _get_paths_from_buildout_script(evaluator, buildout_script_path): buildout_script_paths.add(path) - # cleanup, back to old directory - os.chdir(curdir) - return list(result) + list(buildout_script_paths) + return django_paths + list(buildout_script_paths) -def _get_paths_from_buildout_script(evaluator, buildout_script): - def load(buildout_script): - try: - with open(buildout_script, 'rb') as f: - source = common.source_to_unicode(f.read()) - except IOError: - debug.dbg('Error trying to read buildout_script: %s', buildout_script) - return - - p = Parser(evaluator.grammar, source, buildout_script) - cache.save_parser(buildout_script, p) - return p.module - cached = cache.load_parser(buildout_script) - module = cached and cached.module or load(buildout_script) - if not module: +def _get_paths_from_buildout_script(evaluator, buildout_script_path): + try: + module_node = evaluator.grammar.parse( + path=buildout_script_path, + cache=True, + cache_path=settings.cache_directory + ) + except IOError: + debug.warning('Error trying to read buildout_script: %s', buildout_script_path) return - for path in _check_module(evaluator, module): + from jedi.evaluate.context import ModuleContext + module = ModuleContext(evaluator, module_node, buildout_script_path) + for path in check_sys_path_modifications(module): yield path @@ -209,14 +240,14 @@ def _detect_django_path(module_path): result = [] for parent in traverse_parents(module_path): - with common.ignored(IOError): + with ignored(IOError): with open(parent + os.path.sep + 'manage.py'): debug.dbg('Found django path: %s', module_path) result.append(parent) return result -def _get_buildout_scripts(module_path): +def _get_buildout_script_paths(module_path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin @@ -239,9 +270,39 @@ def _get_buildout_scripts(module_path): firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: extra_module_paths.append(filepath) - except IOError as e: - # either permission error or race cond. because file got deleted + except (UnicodeDecodeError, IOError) as e: + # Probably a binary file; permission error or race cond. because file got deleted # ignore debug.warning(unicode(e)) continue return extra_module_paths + + +def dotted_path_in_sys_path(sys_path, module_path): + """ + Returns the dotted path inside a sys.path. + """ + # First remove the suffix. + for suffix, _, _ in imp.get_suffixes(): + if module_path.endswith(suffix): + module_path = module_path[:-len(suffix)] + break + else: + # There should always be a suffix in a valid Python file on the path. + return None + + if module_path.startswith(os.path.sep): + # The paths in sys.path most of the times don't end with a slash. + module_path = module_path[1:] + + for p in sys_path: + if module_path.startswith(p): + rest = module_path[len(p):] + if rest: + split = rest.split(os.path.sep) + for string in split: + if not string or '.' in string: + return None + return '.'.join(split) + + return None diff --git a/pythonFiles/release/jedi/evaluate/usages.py b/pythonFiles/release/jedi/evaluate/usages.py new file mode 100644 index 000000000000..290c4695b169 --- /dev/null +++ b/pythonFiles/release/jedi/evaluate/usages.py @@ -0,0 +1,62 @@ +from jedi.evaluate import imports +from jedi.evaluate.filters import TreeNameDefinition +from jedi.evaluate.context import ModuleContext + + +def _resolve_names(definition_names, avoid_names=()): + for name in definition_names: + if name in avoid_names: + # Avoiding recursions here, because goto on a module name lands + # on the same module. + continue + + if not isinstance(name, imports.SubModuleName): + # SubModuleNames are not actually existing names but created + # names when importing something like `import foo.bar.baz`. + yield name + + if name.api_type == 'module': + for name in _resolve_names(name.goto(), definition_names): + yield name + + +def _dictionarize(names): + return dict( + (n if n.tree_name is None else n.tree_name, n) + for n in names + ) + + +def _find_names(module_context, tree_name): + context = module_context.create_context(tree_name) + name = TreeNameDefinition(context, tree_name) + found_names = set(name.goto()) + found_names.add(name) + return _dictionarize(_resolve_names(found_names)) + + +def usages(module_context, tree_name): + search_name = tree_name.value + found_names = _find_names(module_context, tree_name) + modules = set(d.get_root_context() for d in found_names.values()) + modules = set(m for m in modules if isinstance(m, ModuleContext)) + + non_matching_usage_maps = {} + for m in imports.get_modules_containing_name(module_context.evaluator, modules, search_name): + for name_leaf in m.tree_node.get_used_names().get(search_name, []): + new = _find_names(m, name_leaf) + if any(tree_name in found_names for tree_name in new): + found_names.update(new) + for tree_name in new: + for dct in non_matching_usage_maps.get(tree_name, []): + # A usage that was previously searched for matches with + # a now found name. Merge. + found_names.update(dct) + try: + del non_matching_usage_maps[tree_name] + except KeyError: + pass + else: + for name in new: + non_matching_usage_maps.setdefault(name, []).append(new) + return found_names.values() diff --git a/pythonFiles/release/jedi/common.py b/pythonFiles/release/jedi/evaluate/utils.py old mode 100755 new mode 100644 similarity index 62% rename from pythonFiles/release/jedi/common.py rename to pythonFiles/release/jedi/evaluate/utils.py index 0e9afd11fb5a..7fc1c246de0d --- a/pythonFiles/release/jedi/common.py +++ b/pythonFiles/release/jedi/evaluate/utils.py @@ -2,11 +2,19 @@ import sys import contextlib import functools -import re -from ast import literal_eval -from jedi._compatibility import unicode, reraise -from jedi import settings +from jedi._compatibility import reraise + + +def to_list(func): + def wrapper(*args, **kwargs): + return list(func(*args, **kwargs)) + return wrapper + + +def unite(iterable): + """Turns a two dimensional array into a one dimensional.""" + return set(typ for types in iterable for typ in types) class UncaughtAttributeError(Exception): @@ -81,16 +89,15 @@ def __next__(self): @contextlib.contextmanager -def scale_speed_settings(factor): - a = settings.max_executions - b = settings.max_until_execution_unique - settings.max_executions *= factor - settings.max_until_execution_unique *= factor +def ignored(*exceptions): + """ + Context manager that ignores all of the specified exceptions. This will + be in the standard library starting with Python 3.4. + """ try: yield - finally: - settings.max_executions = a - settings.max_until_execution_unique = b + except exceptions: + pass def indent_block(text, indention=' '): @@ -101,54 +108,3 @@ def indent_block(text, indention=' '): text = text[:-1] lines = text.split('\n') return '\n'.join(map(lambda s: indention + s, lines)) + temp - - -@contextlib.contextmanager -def ignored(*exceptions): - """ - Context manager that ignores all of the specified exceptions. This will - be in the standard library starting with Python 3.4. - """ - try: - yield - except exceptions: - pass - - -def source_to_unicode(source, encoding=None): - def detect_encoding(): - """ - For the implementation of encoding definitions in Python, look at: - - http://www.python.org/dev/peps/pep-0263/ - - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations - """ - byte_mark = literal_eval(r"b'\xef\xbb\xbf'") - if source.startswith(byte_mark): - # UTF-8 byte-order mark - return 'utf-8' - - first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) - possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", - first_two_lines) - if possible_encoding: - return possible_encoding.group(1) - else: - # the default if nothing else has been set -> PEP 263 - return encoding if encoding is not None else 'iso-8859-1' - - if isinstance(source, unicode): - # only cast str/bytes - return source - - # cast to unicode by default - return unicode(source, detect_encoding(), 'replace') - - -def splitlines(string): - """ - A splitlines for Python code. In contrast to Python's ``str.splitlines``, - looks at form feeds and other special characters as normal text. Just - splits ``\n`` and ``\r\n``. - Also different: Returns ``['']`` for an empty string input. - """ - return re.split('\n|\r\n', string) diff --git a/pythonFiles/release/jedi/parser_utils.py b/pythonFiles/release/jedi/parser_utils.py new file mode 100644 index 000000000000..59c6408ea1c6 --- /dev/null +++ b/pythonFiles/release/jedi/parser_utils.py @@ -0,0 +1,241 @@ +import textwrap +from inspect import cleandoc + +from jedi._compatibility import literal_eval, is_py3 +from parso.python import tree + +_EXECUTE_NODES = set([ + 'funcdef', 'classdef', 'import_from', 'import_name', 'test', 'or_test', + 'and_test', 'not_test', 'comparison', 'expr', 'xor_expr', 'and_expr', + 'shift_expr', 'arith_expr', 'atom_expr', 'term', 'factor', 'power', 'atom' +]) + +_FLOW_KEYWORDS = ( + 'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while' +) + + +def get_executable_nodes(node, last_added=False): + """ + For static analysis. + """ + result = [] + typ = node.type + if typ == 'name': + next_leaf = node.get_next_leaf() + if last_added is False and node.parent.type != 'param' and next_leaf != '=': + result.append(node) + elif typ == 'expr_stmt': + # I think evaluating the statement (and possibly returned arrays), + # should be enough for static analysis. + result.append(node) + for child in node.children: + result += get_executable_nodes(child, last_added=True) + elif typ == 'decorator': + # decorator + if node.children[-2] == ')': + node = node.children[-3] + if node != '(': + result += get_executable_nodes(node) + else: + try: + children = node.children + except AttributeError: + pass + else: + if node.type in _EXECUTE_NODES and not last_added: + result.append(node) + + for child in children: + result += get_executable_nodes(child, last_added) + + return result + + +def get_comp_fors(comp_for): + yield comp_for + last = comp_for.children[-1] + while True: + if last.type == 'comp_for': + yield last + elif not last.type == 'comp_if': + break + last = last.children[-1] + + +def for_stmt_defines_one_name(for_stmt): + """ + Returns True if only one name is returned: ``for x in y``. + Returns False if the for loop is more complicated: ``for x, z in y``. + + :returns: bool + """ + return for_stmt.children[1].type == 'name' + + +def get_flow_branch_keyword(flow_node, node): + start_pos = node.start_pos + if not (flow_node.start_pos < start_pos <= flow_node.end_pos): + raise ValueError('The node is not part of the flow.') + + keyword = None + for i, child in enumerate(flow_node.children): + if start_pos < child.start_pos: + return keyword + first_leaf = child.get_first_leaf() + if first_leaf in _FLOW_KEYWORDS: + keyword = first_leaf + return 0 + +def get_statement_of_position(node, pos): + for c in node.children: + if c.start_pos <= pos <= c.end_pos: + if c.type not in ('decorated', 'simple_stmt', 'suite') \ + and not isinstance(c, (tree.Flow, tree.ClassOrFunc)): + return c + else: + try: + return get_statement_of_position(c, pos) + except AttributeError: + pass # Must be a non-scope + return None + + +def clean_scope_docstring(scope_node): + """ Returns a cleaned version of the docstring token. """ + node = scope_node.get_doc_node() + if node is not None: + # TODO We have to check next leaves until there are no new + # leaves anymore that might be part of the docstring. A + # docstring can also look like this: ``'foo' 'bar' + # Returns a literal cleaned version of the ``Token``. + cleaned = cleandoc(safe_literal_eval(node.value)) + # Since we want the docstr output to be always unicode, just + # force it. + if is_py3 or isinstance(cleaned, unicode): + return cleaned + else: + return unicode(cleaned, 'UTF-8', 'replace') + return '' + + +def safe_literal_eval(value): + first_two = value[:2].lower() + if first_two[0] == 'f' or first_two in ('fr', 'rf'): + # literal_eval is not able to resovle f literals. We have to do that + # manually, but that's right now not implemented. + return '' + + try: + return literal_eval(value) + except SyntaxError: + # It's possible to create syntax errors with literals like rb'' in + # Python 2. This should not be possible and in that case just return an + # empty string. + # Before Python 3.3 there was a more strict definition in which order + # you could define literals. + return '' + + +def get_call_signature(funcdef, width=72, call_string=None): + """ + Generate call signature of this function. + + :param width: Fold lines if a line is longer than this value. + :type width: int + :arg func_name: Override function name when given. + :type func_name: str + + :rtype: str + """ + # Lambdas have no name. + if call_string is None: + if funcdef.type == 'lambdef': + call_string = '' + else: + call_string = funcdef.name.value + if funcdef.type == 'lambdef': + p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')' + else: + p = funcdef.children[2].get_code() + code = call_string + p + + return '\n'.join(textwrap.wrap(code, width)) + + +def get_doc_with_call_signature(scope_node): + """ + Return a document string including call signature. + """ + call_signature = None + if scope_node.type == 'classdef': + for funcdef in scope_node.iter_funcdefs(): + if funcdef.name.value == '__init__': + call_signature = \ + get_call_signature(funcdef, call_string=scope_node.name.value) + elif scope_node.type in ('funcdef', 'lambdef'): + call_signature = get_call_signature(scope_node) + + doc = clean_scope_docstring(scope_node) + if call_signature is None: + return doc + return '%s\n\n%s' % (call_signature, doc) + + +def move(node, line_offset): + """ + Move the `Node` start_pos. + """ + try: + children = node.children + except AttributeError: + node.line += line_offset + else: + for c in children: + move(c, line_offset) + + +def get_following_comment_same_line(node): + """ + returns (as string) any comment that appears on the same line, + after the node, including the # + """ + try: + if node.type == 'for_stmt': + whitespace = node.children[5].get_first_leaf().prefix + elif node.type == 'with_stmt': + whitespace = node.children[3].get_first_leaf().prefix + else: + whitespace = node.get_last_leaf().get_next_leaf().prefix + except AttributeError: + return None + except ValueError: + # TODO in some particular cases, the tree doesn't seem to be linked + # correctly + return None + if "#" not in whitespace: + return None + comment = whitespace[whitespace.index("#"):] + if "\r" in comment: + comment = comment[:comment.index("\r")] + if "\n" in comment: + comment = comment[:comment.index("\n")] + return comment + + +def is_scope(node): + return node.type in ('file_input', 'classdef', 'funcdef', 'lambdef', 'comp_for') + + +def get_parent_scope(node, include_flows=False): + """ + Returns the underlying scope. + """ + scope = node.parent + while scope is not None: + if include_flows and isinstance(scope, tree.Flow): + return scope + if is_scope(scope): + break + scope = scope.parent + return scope diff --git a/pythonFiles/release/jedi/refactoring.py b/pythonFiles/release/jedi/refactoring.py index a342f08e2b8f..ee938427fcee 100755 --- a/pythonFiles/release/jedi/refactoring.py +++ b/pythonFiles/release/jedi/refactoring.py @@ -14,9 +14,8 @@ """ import difflib -from jedi import common +from parso import python_bytes_to_unicode, split_lines from jedi.evaluate import helpers -from jedi.parser import tree as pt class Refactoring(object): @@ -29,7 +28,7 @@ def __init__(self, change_dct): def old_files(self): dct = {} for old_path, (new_path, old_l, new_l) in self.change_dct.items(): - dct[new_path] = '\n'.join(new_l) + dct[old_path] = '\n'.join(old_l) return dct def new_files(self): @@ -83,7 +82,7 @@ def process(path, old_lines, new_lines): with open(current_path) as f: source = f.read() - new_lines = common.splitlines(common.source_to_unicode(source)) + new_lines = split_lines(python_bytes_to_unicode(source)) old_lines = new_lines[:] nr, indent = name.line, name.column @@ -101,7 +100,7 @@ def extract(script, new_name): :type source: str :return: list of changed lines/changed files """ - new_lines = common.splitlines(common.source_to_unicode(script.source)) + new_lines = split_lines(python_bytes_to_unicode(script.source)) old_lines = new_lines[:] user_stmt = script._parser.user_stmt() @@ -160,43 +159,42 @@ def inline(script): """ :type script: api.Script """ - new_lines = common.splitlines(common.source_to_unicode(script.source)) + new_lines = split_lines(python_bytes_to_unicode(script.source)) dct = {} definitions = script.goto_assignments() - with common.ignored(AssertionError): - assert len(definitions) == 1 - stmt = definitions[0]._definition - usages = script.usages() - inlines = [r for r in usages - if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] - inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), - reverse=True) - expression_list = stmt.expression_list() - # don't allow multiline refactorings for now. - assert stmt.start_pos[0] == stmt.end_pos[0] - index = stmt.start_pos[0] - 1 - - line = new_lines[index] - replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] - replace_str = replace_str.strip() - # tuples need parentheses - if expression_list and isinstance(expression_list[0], pr.Array): - arr = expression_list[0] - if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: - replace_str = '(%s)' % replace_str - - # if it's the only assignment, remove the statement - if len(stmt.get_defined_names()) == 1: - line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] - - dct = _rename(inlines, replace_str) - # remove the empty line - new_lines = dct[script.path][2] - if line.strip(): - new_lines[index] = line - else: - new_lines.pop(index) + assert len(definitions) == 1 + stmt = definitions[0]._definition + usages = script.usages() + inlines = [r for r in usages + if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] + inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), + reverse=True) + expression_list = stmt.expression_list() + # don't allow multiline refactorings for now. + assert stmt.start_pos[0] == stmt.end_pos[0] + index = stmt.start_pos[0] - 1 + + line = new_lines[index] + replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] + replace_str = replace_str.strip() + # tuples need parentheses + if expression_list and isinstance(expression_list[0], pr.Array): + arr = expression_list[0] + if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: + replace_str = '(%s)' % replace_str + + # if it's the only assignment, remove the statement + if len(stmt.get_defined_names()) == 1: + line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] + + dct = _rename(inlines, replace_str) + # remove the empty line + new_lines = dct[script.path][2] + if line.strip(): + new_lines[index] = line + else: + new_lines.pop(index) return Refactoring(dct) diff --git a/pythonFiles/release/jedi/settings.py b/pythonFiles/release/jedi/settings.py index fb0b38f55f81..f1ae6dbb77e7 100755 --- a/pythonFiles/release/jedi/settings.py +++ b/pythonFiles/release/jedi/settings.py @@ -16,7 +16,6 @@ ~~~~~~~~~~~~~~~~~ .. autodata:: case_insensitive_completion -.. autodata:: add_dot_after_module .. autodata:: add_bracket_after_function .. autodata:: no_completion_duplicates @@ -44,32 +43,9 @@ .. autodata:: auto_import_modules -.. _settings-recursion: - -Recursions -~~~~~~~~~~ - -Recursion settings are important if you don't want extremly -recursive python code to go absolutely crazy. First of there is a -global limit :data:`max_executions`. This limit is important, to set -a maximum amount of time, the completion may use. - -The default values are based on experiments while completing the |jedi| library -itself (inception!). But I don't think there's any other Python library that -uses recursion in a similarly extreme way. These settings make the completion -definitely worse in some cases. But a completion should also be fast. - -.. autodata:: max_until_execution_unique -.. autodata:: max_function_recursion_level -.. autodata:: max_executions_without_builtins -.. autodata:: max_executions -.. autodata:: scale_call_signatures - - Caching ~~~~~~~ -.. autodata:: star_import_cache_validity .. autodata:: call_signatures_validity @@ -86,13 +62,6 @@ The completion is by default case insensitive. """ -add_dot_after_module = False -""" -Adds a dot after a module, because a module that is not accessed this way is -definitely not the normal case. However, in VIM this doesn't work, that's why -it isn't used at the moment. -""" - add_bracket_after_function = False """ Adds an opening bracket after a function, because that's normal behaviour. @@ -124,7 +93,7 @@ 'jedi') cache_directory = os.path.expanduser(_cache_directory) """ -The path where all the caches can be found. +The path where the cache is stored. On Linux, this defaults to ``~/.cache/jedi/``, on OS X to ``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. @@ -183,55 +152,10 @@ ``globals()`` modifications a lot. """ -# ---------------- -# recursions -# ---------------- - -max_until_execution_unique = 50 -""" -This limit is probably the most important one, because if this limit is -exceeded, functions can only be one time executed. So new functions will be -executed, complex recursions with the same functions again and again, are -ignored. -""" - -max_function_recursion_level = 5 -""" -`max_function_recursion_level` is more about whether the recursions are -stopped in deepth or in width. The ratio beetween this and -`max_until_execution_unique` is important here. It stops a recursion (after -the number of function calls in the recursion), if it was already used -earlier. -""" - -max_executions_without_builtins = 200 -""" -.. todo:: Document this. -""" - -max_executions = 250 -""" -A maximum amount of time, the completion may use. -""" - -scale_call_signatures = 0.1 -""" -Because call_signatures is normally used on every single key hit, it has -to be faster than a normal completion. This is the factor that is used to -scale `max_executions` and `max_until_execution_unique`: -""" - # ---------------- # caching validity (time) # ---------------- -star_import_cache_validity = 60.0 -""" -In huge packages like numpy, checking all star imports on every completion -might be slow, therefore we do a star import caching, that lasts a certain -time span (in seconds). -""" - call_signatures_validity = 3.0 """ Finding function calls might be slow (0.1-0.5s). This is not acceptible for diff --git a/pythonFiles/release/jedi/utils.py b/pythonFiles/release/jedi/utils.py index 6b48ef496a5e..177524c50168 100755 --- a/pythonFiles/release/jedi/utils.py +++ b/pythonFiles/release/jedi/utils.py @@ -5,13 +5,19 @@ from __future__ import absolute_import import __main__ from collections import namedtuple +import logging +import traceback import re import os import sys +from parso import split_lines + from jedi import Interpreter -from jedi.api.helpers import completion_parts -from jedi.parser.user_context import UserContext +from jedi.api.helpers import get_on_completion_name + + +READLINE_DEBUG = False def setup_readline(namespace_module=__main__): @@ -55,6 +61,13 @@ def setup_readline(namespace_module=__main__): bash). """ + if READLINE_DEBUG: + logging.basicConfig( + filename='/tmp/jedi.log', + filemode='a', + level=logging.DEBUG + ) + class JediRL(object): def complete(self, text, state): """ @@ -70,12 +83,21 @@ def complete(self, text, state): sys.path.insert(0, os.getcwd()) # Calling python doesn't have a path, so add to sys.path. try: + logging.debug("Start REPL completion: " + repr(text)) interpreter = Interpreter(text, [namespace_module.__dict__]) - path = UserContext(text, (1, len(text))).get_path_until_cursor() - path, dot, like = completion_parts(path) - before = text[:len(text) - len(like)] + lines = split_lines(text) + position = (len(lines), len(lines[-1])) + name = get_on_completion_name( + interpreter._get_module_node(), + lines, + position + ) + before = text[:len(text) - len(name)] completions = interpreter.completions() + except: + logging.error("REPL Completion error:\n" + traceback.format_exc()) + raise finally: sys.path.pop(0) @@ -88,7 +110,7 @@ def complete(self, text, state): try: import readline except ImportError: - print("Module readline not available.") + print("Jedi: Module readline not available.") else: readline.set_completer(JediRL().complete) readline.parse_and_bind("tab: complete") diff --git a/src/test/.vscode/settings.json b/src/test/.vscode/settings.json index e550760b5f05..a6a0df6d1f21 100644 --- a/src/test/.vscode/settings.json +++ b/src/test/.vscode/settings.json @@ -1,5 +1,5 @@ { - "python.linting.pylintEnabled": true, + "python.linting.pylintEnabled": false, "python.linting.flake8Enabled": false, "python.workspaceSymbols.enabled": false, "python.unitTest.nosetestArgs": [], @@ -21,5 +21,7 @@ "python.linting.pydocstyleEnabled": false, "python.linting.pylamaEnabled": false, "python.linting.mypyEnabled": false, - "python.formatting.provider": "yapf" + "python.formatting.provider": "yapf", + "python.pythonPath": "python", + "python.linting.pylintUseMinimalCheckers": false } \ No newline at end of file diff --git a/src/test/refactor/extension.refactor.extract.method.test.ts b/src/test/refactor/extension.refactor.extract.method.test.ts index ee3c2b34a9fc..69c7262d1206 100644 --- a/src/test/refactor/extension.refactor.extract.method.test.ts +++ b/src/test/refactor/extension.refactor.extract.method.test.ts @@ -10,7 +10,7 @@ import { getTextEditsFromPatch } from '../../client/common/editor'; import { extractMethod } from '../../client/providers/simpleRefactorProvider'; import { RefactorProxy } from '../../client/refactor/proxy'; import { UnitTestIocContainer } from '../unittests/serviceRegistry'; -import { closeActiveWindows, initialize, initializeTest, IS_TRAVIS, wait } from './../initialize'; +import { closeActiveWindows, initialize, initializeTest, wait } from './../initialize'; import { MockOutputChannel } from './../mockClasses'; const EXTENSION_DIR = path.join(__dirname, '..', '..', '..'); @@ -57,47 +57,33 @@ suite('Method Extraction', () => { ioc.registerVariableTypes(); } - function testingMethodExtraction(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingMethodExtraction(shouldError: boolean, startPos: Position, endPos: Position): Promise { const pythonSettings = PythonSettings.getInstance(vscode.Uri.file(refactorTargetFile)); const rangeOfTextToExtract = new vscode.Range(startPos, endPos); const proxy = new RefactorProxy(EXTENSION_DIR, pythonSettings, path.dirname(refactorTargetFile), ioc.serviceContainer); - let expectedTextEdits: vscode.TextEdit[]; - let ignoreErrorHandling = false; - let mockTextDoc: vscode.TextDocument; + // tslint:disable-next-line:no-multiline-string const DIFF = `--- a/refactor.py\n+++ b/refactor.py\n@@ -237,9 +237,12 @@\n try:\n self._process_request(self._input.readline())\n except Exception as ex:\n- message = ex.message + ' \\n' + traceback.format_exc()\n- sys.stderr.write(str(len(message)) + ':' + message)\n- sys.stderr.flush()\n+ self.myNewMethod(ex)\n+\n+ def myNewMethod(self, ex):\n+ message = ex.message + ' \\n' + traceback.format_exc()\n+ sys.stderr.write(str(len(message)) + ':' + message)\n+ sys.stderr.flush()\n \n if __name__ == '__main__':\n RopeRefactoring().watch()\n`; - return new Promise((resolve, reject) => { - vscode.workspace.openTextDocument(refactorTargetFile).then(textDocument => { - mockTextDoc = textDocument; - expectedTextEdits = getTextEditsFromPatch(textDocument.getText(), DIFF); - resolve(); - }, reject); - }) - .then(() => proxy.extractMethod(mockTextDoc, 'myNewMethod', refactorTargetFile, rangeOfTextToExtract, options)) - .then(response => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); - } - const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); - assert.equal(response.results.length, 1, 'Invalid number of items in response'); - assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); - textEdits.forEach(edit => { - const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); - assert.equal(foundEdit.length, 1, 'Edit not found'); - }); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } - - return Promise.reject(error!); + const mockTextDoc = await vscode.workspace.openTextDocument(refactorTargetFile); + const expectedTextEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + try { + const response = await proxy.extractMethod(mockTextDoc, 'myNewMethod', refactorTargetFile, rangeOfTextToExtract, options); + if (shouldError) { + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); + } + const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + assert.equal(response.results.length, 1, 'Invalid number of items in response'); + assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); + textEdits.forEach(edit => { + const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); + assert.equal(foundEdit.length, 1, 'Edit not found'); }); + } catch (error) { + if (!shouldError) { + // Wait a minute this shouldn't work, what's going on + assert.equal('Error', 'No error', `${error}`); + } + } } test('Extract Method', async () => { @@ -112,68 +98,39 @@ suite('Method Extraction', () => { await testingMethodExtraction(true, startPos, endPos); }); - function testingMethodExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingMethodExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position): Promise { const ch = new MockOutputChannel('Python'); - let textDocument: vscode.TextDocument; - let textEditor: vscode.TextEditor; const rangeOfTextToExtract = new vscode.Range(startPos, endPos); - let ignoreErrorHandling = false; - - return vscode.workspace.openTextDocument(refactorTargetFile).then(document => { - textDocument = document; - return vscode.window.showTextDocument(textDocument); - }).then(editor => { - assert(vscode.window.activeTextEditor, 'No active editor'); - editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; - editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); - textEditor = editor; - return; - }).then(() => { - return extractMethod(EXTENSION_DIR, textEditor, rangeOfTextToExtract, ch, ioc.serviceContainer).then(() => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); - } - return textEditor.document.save(); - }).then(() => { - assert.equal(ch.output.length, 0, 'Output channel is not empty'); - assert.equal(textDocument.lineAt(241).text.trim().indexOf('def newmethod'), 0, 'New Method not created'); - assert.equal(textDocument.lineAt(239).text.trim().startsWith('self.newmethod'), true, 'New Method not being used'); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } - - return Promise.reject(error!); - }); - }, error => { - if (ignoreErrorHandling) { - return Promise.reject(error); - } + + const textDocument = await vscode.workspace.openTextDocument(refactorTargetFile); + const editor = await vscode.window.showTextDocument(textDocument); + + editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; + editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); + + try { + await extractMethod(EXTENSION_DIR, editor, rangeOfTextToExtract, ch, ioc.serviceContainer); if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - } else { - // tslint:disable-next-line:prefer-template restrict-plus-operands - assert.fail(error, null, 'Method extraction failed\n' + ch.output, ''); - return Promise.reject(error); + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); } - }); + + const newMethodRefLine = textDocument.lineAt(editor.selection.start); + assert.equal(ch.output.length, 0, 'Output channel is not empty'); + assert.equal(textDocument.lineAt(newMethodRefLine.lineNumber + 2).text.trim().indexOf('def newmethod'), 0, 'New Method not created'); + assert.equal(newMethodRefLine.text.trim().startsWith('self.newmethod'), true, 'New Method not being used'); + } catch (error) { + if (!shouldError) { + assert.equal('Error', 'No error', `${error}`); + } + } } // This test fails on linux (text document not getting updated in time) - if (!IS_TRAVIS) { - test('Extract Method (end to end)', async () => { - const startPos = new vscode.Position(239, 0); - const endPos = new vscode.Position(241, 35); - await testingMethodExtractionEndToEnd(false, startPos, endPos); - }); - } + test('Extract Method (end to end)', async () => { + const startPos = new vscode.Position(239, 0); + const endPos = new vscode.Position(241, 35); + await testingMethodExtractionEndToEnd(false, startPos, endPos); + }); test('Extract Method will fail if complete statements are not selected', async () => { const startPos = new vscode.Position(239, 30); diff --git a/src/test/refactor/extension.refactor.extract.var.test.ts b/src/test/refactor/extension.refactor.extract.var.test.ts index d12283a74198..2fd0b3161930 100644 --- a/src/test/refactor/extension.refactor.extract.var.test.ts +++ b/src/test/refactor/extension.refactor.extract.var.test.ts @@ -56,46 +56,31 @@ suite('Variable Extraction', () => { ioc.registerVariableTypes(); } - function testingVariableExtraction(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingVariableExtraction(shouldError: boolean, startPos: Position, endPos: Position): Promise { const pythonSettings = PythonSettings.getInstance(vscode.Uri.file(refactorTargetFile)); const rangeOfTextToExtract = new vscode.Range(startPos, endPos); const proxy = new RefactorProxy(EXTENSION_DIR, pythonSettings, path.dirname(refactorTargetFile), ioc.serviceContainer); - let expectedTextEdits: vscode.TextEdit[]; - let ignoreErrorHandling = false; - let mockTextDoc: vscode.TextDocument; + const DIFF = '--- a/refactor.py\n+++ b/refactor.py\n@@ -232,7 +232,8 @@\n sys.stdout.flush()\n \n def watch(self):\n- self._write_response("STARTED")\n+ myNewVariable = "STARTED"\n+ self._write_response(myNewVariable)\n while True:\n try:\n self._process_request(self._input.readline())\n'; - return new Promise((resolve, reject) => { - vscode.workspace.openTextDocument(refactorTargetFile).then(textDocument => { - mockTextDoc = textDocument; - expectedTextEdits = getTextEditsFromPatch(textDocument.getText(), DIFF); - resolve(); - }, reject); - }) - .then(() => proxy.extractVariable(mockTextDoc, 'myNewVariable', refactorTargetFile, rangeOfTextToExtract, options)) - .then(response => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail(null, null, 'Extraction should fail with an error', ''); - } - const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); - assert.equal(response.results.length, 1, 'Invalid number of items in response'); - assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); - textEdits.forEach(edit => { - const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); - assert.equal(foundEdit.length, 1, 'Edit not found'); - }); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } - - return Promise.reject(error!); + const mockTextDoc = await vscode.workspace.openTextDocument(refactorTargetFile); + const expectedTextEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + try { + const response = await proxy.extractVariable(mockTextDoc, 'myNewVariable', refactorTargetFile, rangeOfTextToExtract, options); + if (shouldError) { + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); + } + const textEdits = getTextEditsFromPatch(mockTextDoc.getText(), DIFF); + assert.equal(response.results.length, 1, 'Invalid number of items in response'); + assert.equal(textEdits.length, expectedTextEdits.length, 'Invalid number of Text Edits'); + textEdits.forEach(edit => { + const foundEdit = expectedTextEdits.filter(item => item.newText === edit.newText && item.range.isEqual(edit.range)); + assert.equal(foundEdit.length, 1, 'Edit not found'); }); + } catch (error) { + if (!shouldError) { + assert.equal('Error', 'No error', `${error}`); + } + } } test('Extract Variable', async () => { @@ -110,58 +95,33 @@ suite('Variable Extraction', () => { await testingVariableExtraction(true, startPos, endPos); }); - function testingVariableExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position) { + async function testingVariableExtractionEndToEnd(shouldError: boolean, startPos: Position, endPos: Position): Promise { const ch = new MockOutputChannel('Python'); - let textDocument: vscode.TextDocument; - let textEditor: vscode.TextEditor; const rangeOfTextToExtract = new vscode.Range(startPos, endPos); - let ignoreErrorHandling = false; - return vscode.workspace.openTextDocument(refactorTargetFile).then(document => { - textDocument = document; - return vscode.window.showTextDocument(textDocument); - }).then(editor => { - assert(vscode.window.activeTextEditor, 'No active editor'); - editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; - editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); - textEditor = editor; - return; - }).then(() => { - return extractVariable(EXTENSION_DIR, textEditor, rangeOfTextToExtract, ch, ioc.serviceContainer).then(() => { - if (shouldError) { - ignoreErrorHandling = true; - assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); - } - return textEditor.document.save(); - }).then(() => { - assert.equal(ch.output.length, 0, 'Output channel is not empty'); - assert.equal(textDocument.lineAt(234).text.trim().indexOf('newvariable'), 0, 'New Variable not created'); - assert.equal(textDocument.lineAt(234).text.trim().endsWith('= "STARTED"'), true, 'Started Text Assigned to variable'); - assert.equal(textDocument.lineAt(235).text.indexOf('(newvariable') >= 0, true, 'New Variable not being used'); - }).catch((error: any) => { - if (ignoreErrorHandling) { - return Promise.reject(error!); - } - if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - return; - } - - return Promise.reject(error)!; - }); - }, error => { - if (ignoreErrorHandling) { - return Promise.reject(error); - } + + const textDocument = await vscode.workspace.openTextDocument(refactorTargetFile); + const editor = await vscode.window.showTextDocument(textDocument); + + editor.selections = [new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end)]; + editor.selection = new vscode.Selection(rangeOfTextToExtract.start, rangeOfTextToExtract.end); + try { + await extractVariable(EXTENSION_DIR, editor, rangeOfTextToExtract, ch, ioc.serviceContainer); if (shouldError) { - // Wait a minute this shouldn't work, what's going on - assert.equal(true, true, 'Error raised as expected'); - } else { - // tslint:disable-next-line:prefer-template restrict-plus-operands - assert.fail(error + '', null, 'Variable extraction failed\n' + ch.output, ''); - return Promise.reject(error); + assert.fail('No error', 'Error', 'Extraction should fail with an error', ''); } - }); + assert.equal(ch.output.length, 0, 'Output channel is not empty'); + + const newVarDefLine = textDocument.lineAt(editor.selection.start); + const newVarRefLine = textDocument.lineAt(newVarDefLine.lineNumber + 1); + + assert.equal(newVarDefLine.text.trim().indexOf('newvariable'), 0, 'New Variable not created'); + assert.equal(newVarDefLine.text.trim().endsWith('= "STARTED"'), true, 'Started Text Assigned to variable'); + assert.equal(newVarRefLine.text.indexOf('(newvariable') >= 0, true, 'New Variable not being used'); + } catch (error) { + if (!shouldError) { + assert.fail('Error', 'No error', `${error}`); + } + } } // This test fails on linux (text document not getting updated in time)