Skip to content

Commit

Permalink
Merge branch 'master' into import-cycle
Browse files Browse the repository at this point in the history
* master:
  New files shouldn't trigger a coarse-grained rebuild in fg cache mode (python#4669)
  Bump version to 0.580-dev
  Update revision history for 0.570 (python#4662)
  Fine-grained: Fix crashes when refreshing synthetic types (python#4667)
  Fine-grained: Support NewType and reset subtype caches (python#4656)
  Fine-grained: Detect changes in additional TypeInfo attributes (python#4659)
  Fine-grained: Apply semantic analyzer patch callbacks (python#4658)
  Optimize fine-grained update by using Graph as the cache (python#4622)
  Cleanup check_reverse_op_method (python#4017)
  Fine-grained: Fix AST merge issues (python#4652)
  Optionally check that we don't have duplicate nodes after AST merge (python#4647)
  • Loading branch information
carljm committed Mar 6, 2018
2 parents 2a37e80 + ac90292 commit 8a222da
Show file tree
Hide file tree
Showing 33 changed files with 1,283 additions and 393 deletions.
5 changes: 5 additions & 0 deletions docs/source/revision_history.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@ Revision history

List of major changes:

- March 2018
* Publish ``mypy`` version 0.570 on PyPI.

* Add support for :ref:`attrs_package`.

- December 2017
* Publish ``mypy`` version 0.560 on PyPI.

Expand Down
99 changes: 62 additions & 37 deletions mypy/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

from mypy.nodes import (MODULE_REF, MypyFile, Node, ImportBase, Import, ImportFrom, ImportAll)
from mypy.semanal_pass1 import SemanticAnalyzerPass1
from mypy.semanal import SemanticAnalyzerPass2
from mypy.semanal import SemanticAnalyzerPass2, apply_semantic_analyzer_patches
from mypy.semanal_pass3 import SemanticAnalyzerPass3
from mypy.checker import TypeChecker
from mypy.indirection import TypeIndirectionVisitor
Expand Down Expand Up @@ -389,7 +389,6 @@ def default_lib_path(data_dir: str,
CacheMeta = NamedTuple('CacheMeta',
[('id', str),
('path', str),
('memory_only', bool), # no corresponding json files (fine-grained only)
('mtime', int),
('size', int),
('hash', str),
Expand All @@ -415,7 +414,6 @@ def cache_meta_from_dict(meta: Dict[str, Any], data_json: str) -> CacheMeta:
return CacheMeta(
meta.get('id', sentinel),
meta.get('path', sentinel),
meta.get('memory_only', False),
int(meta['mtime']) if 'mtime' in meta else sentinel,
meta.get('size', sentinel),
meta.get('hash', sentinel),
Expand Down Expand Up @@ -569,7 +567,7 @@ class BuildManager:
plugin: Active mypy plugin(s)
errors: Used for reporting all errors
flush_errors: A function for processing errors after each SCC
saved_cache: Dict with saved cache state for dmypy and fine-grained incremental mode
saved_cache: Dict with saved cache state for coarse-grained dmypy
(read-write!)
stats: Dict with various instrumentation numbers
"""
Expand All @@ -590,6 +588,7 @@ def __init__(self, data_dir: str,
self.data_dir = data_dir
self.errors = errors
self.errors.set_ignore_prefix(ignore_prefix)
self.only_load_from_cache = options.use_fine_grained_cache
self.lib_path = tuple(lib_path)
self.source_set = source_set
self.reports = reports
Expand Down Expand Up @@ -626,6 +625,8 @@ def all_imported_modules_in_file(self,
Return list of tuples (priority, module id, import line number)
for all modules imported in file; lower numbers == higher priority.
Can generate blocking errors on bogus relative imports.
"""

def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
Expand All @@ -640,6 +641,12 @@ def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
file_id = ".".join(file_id.split(".")[:-rel])
new_id = file_id + "." + imp.id if imp.id else file_id

if not new_id:
self.errors.set_file(file.path, file.name())
self.errors.report(imp.line, 0,
"No parent module -- cannot perform relative import",
blocker=True)

return new_id

res = [] # type: List[Tuple[int, str, int]]
Expand Down Expand Up @@ -1129,12 +1136,6 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
manager.log('Metadata abandoned for {}: errors were previously ignored'.format(id))
return None

if meta.memory_only:
# Special case for fine-grained incremental mode when the JSON file is missing but
# we want to cache the module anyway.
manager.log('Memory-only metadata for {}'.format(id))
return meta

assert path is not None, "Internal error: meta was provided without a path"
# Check data_json; assume if its mtime matches it's good.
# TODO: stat() errors
Expand Down Expand Up @@ -1623,7 +1624,8 @@ def __init__(self,
self.ignore_all = True
else:
# In 'error' mode, produce special error messages.
manager.log("Skipping %s (%s)" % (path, id))
if id not in manager.missing_modules:
manager.log("Skipping %s (%s)" % (path, id))
if follow_imports == 'error':
if ancestor_for:
self.skipping_ancestor(id, path, ancestor_for)
Expand Down Expand Up @@ -1673,9 +1675,16 @@ def __init__(self,
for id, line in zip(self.meta.dependencies, self.meta.dep_lines)}
self.child_modules = set(self.meta.child_modules)
else:
# In fine-grained cache mode, pretend we only know about modules that
# have cache information and defer handling new modules until the
# fine-grained update.
if manager.only_load_from_cache:
manager.log("Deferring module to fine-grained update %s (%s)" % (path, id))
raise ModuleNotFound

# Parse the file (and then some) to get the dependencies.
self.parse_file()
self.suppressed = []
self.compute_dependencies()
self.child_modules = set()

def skipping_ancestor(self, id: str, path: str, ancestor_for: 'State') -> None:
Expand Down Expand Up @@ -1830,6 +1839,8 @@ def fix_suppressed_dependencies(self, graph: Graph) -> None:
"""
# TODO: See if it's possible to move this check directly into parse_file in some way.
# TODO: Find a way to write a test case for this fix.
# TODO: I suspect that splitting compute_dependencies() out from parse_file
# obviates the need for this but lacking a test case for the problem this fixed...
silent_mode = (self.options.ignore_missing_imports or
self.options.follow_imports == 'skip')
if not silent_mode:
Expand Down Expand Up @@ -1896,49 +1907,48 @@ def parse_file(self) -> None:
# TODO: Why can't SemanticAnalyzerPass1 .analyze() do this?
self.tree.names = manager.semantic_analyzer.globals

self.check_blockers()

def compute_dependencies(self) -> None:
"""Compute a module's dependencies after parsing it.
This is used when we parse a file that we didn't have
up-to-date cache information for. When we have an up-to-date
cache, we just use the cached info.
"""
manager = self.manager
assert self.tree is not None

# Compute (direct) dependencies.
# Add all direct imports (this is why we needed the first pass).
# Also keep track of each dependency's source line.
dependencies = []
suppressed = []
priorities = {} # type: Dict[str, int] # id -> priority
dep_line_map = {} # type: Dict[str, int] # id -> line
for pri, id, line in manager.all_imported_modules_in_file(self.tree):
priorities[id] = min(pri, priorities.get(id, PRI_ALL))
if id == self.id:
continue
# Omit missing modules, as otherwise we could not type-check
# programs with missing modules.
if id in manager.missing_modules:
if id not in dep_line_map:
suppressed.append(id)
dep_line_map[id] = line
continue
if id == '':
# Must be from a relative import.
manager.errors.set_file(self.xpath, self.id)
manager.errors.report(line, 0,
"No parent module -- cannot perform relative import",
blocker=True)
continue
if id not in dep_line_map:
dependencies.append(id)
dep_line_map[id] = line
# Every module implicitly depends on builtins.
if self.id != 'builtins' and 'builtins' not in dep_line_map:
dependencies.append('builtins')

# If self.dependencies is already set, it was read from the
# cache, but for some reason we're re-parsing the file.
# NOTE: What to do about race conditions (like editing the
# file while mypy runs)? A previous version of this code
# explicitly checked for this, but ran afoul of other reasons
# for differences (e.g. silent mode).

# Missing dependencies will be moved from dependencies to
# suppressed when they fail to be loaded in load_graph.
self.dependencies = dependencies
self.suppressed = suppressed
self.suppressed = []
self.priorities = priorities
self.dep_line_map = dep_line_map
self.check_blockers()

self.check_blockers() # Can fail due to bogus relative imports

def semantic_analysis(self) -> None:
assert self.tree is not None, "Internal error: method must be called on parsed file only"
Expand All @@ -1958,9 +1968,7 @@ def semantic_analysis_pass_three(self) -> None:
self.patches = patches + self.patches

def semantic_analysis_apply_patches(self) -> None:
patches_by_priority = sorted(self.patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
apply_semantic_analyzer_patches(self.patches)

def type_check_first_pass(self) -> None:
if self.options.semantic_analysis_only:
Expand Down Expand Up @@ -1992,7 +2000,9 @@ def finish_passes(self) -> None:
return
with self.wrap_context():
# Some tests want to look at the set of all types.
if manager.options.use_builtins_fixtures or manager.options.dump_deps:
options = manager.options
if ((options.use_builtins_fixtures and not options.fine_grained_incremental) or
manager.options.dump_deps):
manager.all_types.update(self.type_map())

if self.options.incremental:
Expand Down Expand Up @@ -2091,6 +2101,15 @@ def dispatch(sources: List[BuildSource], manager: BuildManager) -> Graph:
manager.log("Mypy version %s" % __version__)
t0 = time.time()
graph = load_graph(sources, manager)

# This is a kind of unfortunate hack to work around some of fine-grained's
# fragility: if we have loaded less than 50% of the specified files from
# cache in fine-grained cache mode, load the graph again honestly.
if manager.options.use_fine_grained_cache and len(graph) < 0.50 * len(sources):
manager.log("Redoing load_graph because too much was missing")
manager.only_load_from_cache = False
graph = load_graph(sources, manager)

t1 = time.time()
manager.add_stats(graph_size=len(graph),
stubs_found=sum(g.path is not None and g.path.endswith('.pyi')
Expand Down Expand Up @@ -2193,13 +2212,19 @@ def dump_graph(graph: Graph) -> None:
print("[" + ",\n ".join(node.dumps() for node in nodes) + "\n]")


def load_graph(sources: List[BuildSource], manager: BuildManager) -> Graph:
def load_graph(sources: List[BuildSource], manager: BuildManager,
old_graph: Optional[Graph] = None) -> Graph:
"""Given some source files, load the full dependency graph.
If an old_graph is passed in, it is used as the starting point and
modified during graph loading.
As this may need to parse files, this can raise CompileError in case
there are syntax errors.
"""
graph = {} # type: Graph

graph = old_graph if old_graph is not None else {} # type: Graph

# The deque is used to implement breadth-first traversal.
# TODO: Consider whether to go depth-first instead. This may
# affect the order in which we process files within import cycles.
Expand Down
Loading

0 comments on commit 8a222da

Please sign in to comment.