From 29a4ec0d434dba190dfe9e4d46f16e4c026dd6cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Thu, 15 Mar 2018 10:03:36 +0100 Subject: [PATCH 1/2] Make queries thread safe. Remove the query stack and make queries point to their parents instead. --- src/librustc/ty/context.rs | 182 +++++++++---- src/librustc/ty/maps/job.rs | 91 +++++++ src/librustc/ty/maps/mod.rs | 10 +- src/librustc/ty/maps/on_disk_cache.rs | 9 + src/librustc/ty/maps/plumbing.rs | 351 +++++++++++++++++--------- src/librustc_errors/lib.rs | 25 +- 6 files changed, 486 insertions(+), 182 deletions(-) create mode 100644 src/librustc/ty/maps/job.rs diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 966c96e594fc4..adb0ecd39846a 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1222,7 +1222,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Lrc::new(StableVec::new(v))); } - tls::enter_global(GlobalCtxt { + let gcx = &GlobalCtxt { sess: s, cstore, global_arenas: &arenas.global, @@ -1263,7 +1263,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { all_traits: RefCell::new(None), tx_to_llvm_workers: tx, output_filenames: Arc::new(output_filenames.clone()), - }, f) + }; + + tls::enter_global(gcx, f) } pub fn consider_optimizing String>(&self, msg: T) -> bool { @@ -1487,11 +1489,25 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { /// Call the closure with a local `TyCtxt` using the given arena. - pub fn enter_local(&self, arena: &'tcx DroplessArena, f: F) -> R + pub fn enter_local(&self, + arena: &'tcx DroplessArena, + f: F) -> R where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { let interners = CtxtInterners::new(arena); - tls::enter(self, &interners, f) + let tcx = TyCtxt { + gcx: self, + interners: &interners, + }; + ty::tls::with_related_context(tcx.global_tcx(), |icx| { + let new_icx = ty::tls::ImplicitCtxt { + tcx, + query: icx.query.clone(), + }; + ty::tls::enter_context(&new_icx, |new_icx| { + f(new_icx.tcx) + }) + }) } } @@ -1638,21 +1654,34 @@ impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { } pub mod tls { - use super::{CtxtInterners, GlobalCtxt, TyCtxt}; + use super::{GlobalCtxt, TyCtxt}; use std::cell::Cell; use std::fmt; + use std::mem; use syntax_pos; + use ty::maps; + use errors::{Diagnostic, TRACK_DIAGNOSTICS}; + use rustc_data_structures::OnDrop; + use rustc_data_structures::sync::Lrc; + + #[derive(Clone)] + pub struct ImplicitCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + pub query: Option>>, + } + + thread_local!(static TLV: Cell = Cell::new(0)); - /// Marker types used for the scoped TLS slot. - /// The type context cannot be used directly because the scoped TLS - /// in libstd doesn't allow types generic over lifetimes. - enum ThreadLocalGlobalCtxt {} - enum ThreadLocalInterners {} + fn set_tlv R, R>(value: usize, f: F) -> R { + let old = get_tlv(); + let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old))); + TLV.with(|tlv| tlv.set(value)); + f() + } - thread_local! { - static TLS_TCX: Cell> = Cell::new(None) + fn get_tlv() -> usize { + TLV.with(|tlv| tlv.get()) } fn span_debug(span: syntax_pos::Span, f: &mut fmt::Formatter) -> fmt::Result { @@ -1661,59 +1690,120 @@ pub mod tls { }) } - pub fn enter_global<'gcx, F, R>(gcx: GlobalCtxt<'gcx>, f: F) -> R - where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R + fn track_diagnostic(diagnostic: &Diagnostic) { + with_context(|context| { + if let Some(ref query) = context.query { + query.diagnostics.lock().push(diagnostic.clone()); + } + }) + } + + pub fn with_thread_locals(f: F) -> R + where F: FnOnce() -> R { syntax_pos::SPAN_DEBUG.with(|span_dbg| { let original_span_debug = span_dbg.get(); span_dbg.set(span_debug); - let result = enter(&gcx, &gcx.global_interners, f); - span_dbg.set(original_span_debug); - result + + let _on_drop = OnDrop(move || { + span_dbg.set(original_span_debug); + }); + + TRACK_DIAGNOSTICS.with(|current| { + let original = current.get(); + current.set(track_diagnostic); + + let _on_drop = OnDrop(move || { + current.set(original); + }); + + f() + }) }) } - pub fn enter<'a, 'gcx: 'tcx, 'tcx, F, R>(gcx: &'a GlobalCtxt<'gcx>, - interners: &'a CtxtInterners<'tcx>, - f: F) -> R - where F: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + pub fn enter_global<'gcx, F, R>(gcx: &GlobalCtxt<'gcx>, f: F) -> R + where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R { - let gcx_ptr = gcx as *const _ as *const ThreadLocalGlobalCtxt; - let interners_ptr = interners as *const _ as *const ThreadLocalInterners; - TLS_TCX.with(|tls| { - let prev = tls.get(); - tls.set(Some((gcx_ptr, interners_ptr))); - let ret = f(TyCtxt { + with_thread_locals(|| { + let tcx = TyCtxt { gcx, - interners, - }); - tls.set(prev); - ret + interners: &gcx.global_interners, + }; + let icx = ImplicitCtxt { + tcx, + query: None, + }; + enter_context(&icx, |_| { + f(tcx) + }) + }) + } + + pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>, + f: F) -> R + where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R + { + set_tlv(context as *const _ as usize, || { + f(&context) + }) + } + + pub fn with_context_opt(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R + { + let context = get_tlv(); + if context == 0 { + f(None) + } else { + unsafe { f(Some(&*(context as *const ImplicitCtxt))) } + } + } + + pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R + where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R + { + with_context(|context| { + unsafe { + let gcx = tcx.gcx as *const _ as usize; + let interners = tcx.interners as *const _ as usize; + assert!(context.tcx.gcx as *const _ as usize == gcx); + assert!(context.tcx.interners as *const _ as usize == interners); + let context: &ImplicitCtxt = mem::transmute(context); + f(context) + } }) } + pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R + where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R + { + with_context(|context| { + unsafe { + let gcx = tcx.gcx as *const _ as usize; + assert!(context.tcx.gcx as *const _ as usize == gcx); + let context: &ImplicitCtxt = mem::transmute(context); + f(context) + } + }) + } + + pub fn with_context(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R + { + with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls"))) + } + pub fn with(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { - TLS_TCX.with(|tcx| { - let (gcx, interners) = tcx.get().unwrap(); - let gcx = unsafe { &*(gcx as *const GlobalCtxt) }; - let interners = unsafe { &*(interners as *const CtxtInterners) }; - f(TyCtxt { - gcx, - interners, - }) - }) + with_context(|context| f(context.tcx)) } pub fn with_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option>) -> R { - if TLS_TCX.with(|tcx| tcx.get().is_some()) { - with(|v| f(Some(v))) - } else { - f(None) - } + with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx))) } } diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs new file mode 100644 index 0000000000000..2ed993bd975e5 --- /dev/null +++ b/src/librustc/ty/maps/job.rs @@ -0,0 +1,91 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(warnings)] + +use std::mem; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use rustc_data_structures::sync::{Lock, LockGuard, Lrc}; +use syntax_pos::Span; +use ty::tls; +use ty::maps::Query; +use ty::maps::plumbing::CycleError; +use ty::context::TyCtxt; +use errors::Diagnostic; +use std::process; +use std::fmt; +use std::sync::{Arc, Mutex}; +use std::collections::HashSet; + +pub struct PoisonedJob; + +#[derive(Clone, Debug)] +pub struct StackEntry<'tcx> { + pub span: Span, + pub query: Query<'tcx>, +} + +pub struct QueryJob<'tcx> { + pub entry: StackEntry<'tcx>, + pub parent: Option>>, + pub track_diagnostics: bool, + pub diagnostics: Lock>, +} + +impl<'tcx> QueryJob<'tcx> { + pub fn new( + entry: StackEntry<'tcx>, + track_diagnostics: bool, + parent: Option>>, + ) -> Self { + QueryJob { + track_diagnostics, + diagnostics: Lock::new(Vec::new()), + entry, + parent, + } + } + + pub(super) fn await<'lcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'lcx>, + span: Span, + ) -> Result<(), CycleError<'tcx>> { + // The query is already executing, so this must be a cycle for single threaded rustc, + // so we find the cycle and return it + + let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone()); + let mut cycle = Vec::new(); + + while let Some(job) = current_job { + cycle.insert(0, job.entry.clone()); + + if &*job as *const _ == self as *const _ { + break; + } + + current_job = job.parent.clone(); + } + + Err(CycleError { span, cycle }) + } + + pub fn signal_complete(&self) { + // Signals to waiters that the query is complete. + // This is a no-op for single threaded rustc + } +} + +pub(super) enum QueryResult<'tcx, T> { + Started(Lrc>), + Complete(T), + Poisoned, +} diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index c16ad0d8ca140..6dd31baa5931b 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -14,7 +14,7 @@ use hir::def_id::{CrateNum, DefId, DefIndex}; use hir::def::{Def, Export}; use hir::{self, TraitCandidate, ItemLocalId, TransFnAttrs}; use hir::svh::Svh; -use infer::canonical::{Canonical, QueryResult}; +use infer::canonical::{self, Canonical}; use lint; use middle::borrowck::BorrowCheckResult; use middle::cstore::{ExternCrate, LinkagePreference, NativeLibrary, @@ -66,6 +66,10 @@ mod plumbing; use self::plumbing::*; pub use self::plumbing::force_from_dep_node; +mod job; +pub use self::job::{QueryJob, StackEntry, PoisonedJob}; +use self::job::QueryResult; + mod keys; pub use self::keys::Key; @@ -399,7 +403,7 @@ define_maps! { <'tcx> [] fn normalize_projection_ty: NormalizeProjectionTy( CanonicalProjectionGoal<'tcx> ) -> Result< - Lrc>>>, + Lrc>>>, NoSolution, >, @@ -412,7 +416,7 @@ define_maps! { <'tcx> [] fn dropck_outlives: DropckOutlives( CanonicalTyGoal<'tcx> ) -> Result< - Lrc>>>, + Lrc>>>, NoSolution, >, diff --git a/src/librustc/ty/maps/on_disk_cache.rs b/src/librustc/ty/maps/on_disk_cache.rs index c103d6e015aa4..4d78703613e94 100644 --- a/src/librustc/ty/maps/on_disk_cache.rs +++ b/src/librustc/ty/maps/on_disk_cache.rs @@ -30,6 +30,7 @@ use syntax::codemap::{CodeMap, StableFilemapId}; use syntax_pos::{BytePos, Span, DUMMY_SP, FileMap}; use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo}; use ty; +use ty::maps::job::QueryResult; use ty::codec::{self as ty_codec, TyDecoder, TyEncoder}; use ty::context::TyCtxt; @@ -239,6 +240,10 @@ impl<'sess> OnDiskCache<'sess> { for (key, entry) in const_eval::get_cache_internal(tcx).map.iter() { use ty::maps::config::QueryDescription; if const_eval::cache_on_disk(key.clone()) { + let entry = match *entry { + QueryResult::Complete(ref v) => v, + _ => panic!("incomplete query"), + }; if let Ok(ref value) = entry.value { let dep_node = SerializedDepNodeIndex::new(entry.index.index()); @@ -1109,6 +1114,10 @@ fn encode_query_results<'enc, 'a, 'tcx, Q, E>(tcx: TyCtxt<'a, 'tcx, 'tcx>, { for (key, entry) in Q::get_cache_internal(tcx).map.iter() { if Q::cache_on_disk(key.clone()) { + let entry = match *entry { + QueryResult::Complete(ref v) => v, + _ => panic!("incomplete query"), + }; let dep_node = SerializedDepNodeIndex::new(entry.index.index()); // Record position of the cache entry diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs index 46106d8ec0e91..895bf3d797336 100644 --- a/src/librustc/ty/maps/plumbing.rs +++ b/src/librustc/ty/maps/plumbing.rs @@ -15,19 +15,18 @@ use dep_graph::{DepNodeIndex, DepNode, DepKind, DepNodeColor}; use errors::DiagnosticBuilder; use ty::{TyCtxt}; -use ty::maps::Query; // NB: actually generated by the macros in this file use ty::maps::config::QueryDescription; +use ty::maps::job::{QueryResult, StackEntry}; use ty::item_path; use rustc_data_structures::fx::{FxHashMap}; -use std::cell::{Ref, RefMut}; +use rustc_data_structures::sync::LockGuard; use std::marker::PhantomData; -use std::mem; use syntax_pos::Span; pub(super) struct QueryMap<'tcx, D: QueryDescription<'tcx>> { phantom: PhantomData<(D, &'tcx ())>, - pub(super) map: FxHashMap>, + pub(super) map: FxHashMap>>, } pub(super) struct QueryValue { @@ -57,23 +56,19 @@ impl<'tcx, M: QueryDescription<'tcx>> QueryMap<'tcx, M> { pub(super) trait GetCacheInternal<'tcx>: QueryDescription<'tcx> + Sized { fn get_cache_internal<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Ref<'a, QueryMap<'tcx, Self>>; + -> LockGuard<'a, QueryMap<'tcx, Self>>; } -pub(super) struct CycleError<'a, 'tcx: 'a> { - span: Span, - cycle: RefMut<'a, [(Span, Query<'tcx>)]>, +#[derive(Clone)] +pub(super) struct CycleError<'tcx> { + pub(super) span: Span, + pub(super) cycle: Vec>, } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub(super) fn report_cycle(self, CycleError { span, cycle }: CycleError) + pub(super) fn report_cycle(self, CycleError { span, cycle: stack }: CycleError) -> DiagnosticBuilder<'a> { - // Subtle: release the refcell lock before invoking `describe()` - // below by dropping `cycle`. - let stack = cycle.to_vec(); - mem::drop(cycle); - assert!(!stack.is_empty()); // Disable naming impls with types in this path, since that @@ -87,44 +82,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { "cyclic dependency detected"); err.span_label(span, "cyclic reference"); - err.span_note(self.sess.codemap().def_span(stack[0].0), - &format!("the cycle begins when {}...", stack[0].1.describe(self))); + err.span_note(self.sess.codemap().def_span(stack[0].span), + &format!("the cycle begins when {}...", stack[0].query.describe(self))); - for &(span, ref query) in &stack[1..] { + for &StackEntry { span, ref query, .. } in &stack[1..] { err.span_note(self.sess.codemap().def_span(span), &format!("...which then requires {}...", query.describe(self))); } err.note(&format!("...which then again requires {}, completing the cycle.", - stack[0].1.describe(self))); + stack[0].query.describe(self))); return err }) } - pub(super) fn cycle_check(self, span: Span, query: Query<'gcx>, compute: F) - -> Result> - where F: FnOnce() -> R - { - { - let mut stack = self.maps.query_stack.borrow_mut(); - if let Some((i, _)) = stack.iter().enumerate().rev() - .find(|&(_, &(_, ref q))| *q == query) { - return Err(CycleError { - span, - cycle: RefMut::map(stack, |stack| &mut stack[i..]) - }); - } - stack.push((span, query)); - } - - let result = compute(); - - self.maps.query_stack.borrow_mut().pop(); - - Ok(result) - } - /// Try to read a node index for the node dep_node. /// A node will have an index, when it's already been marked green, or when we can mark it /// green. This function will mark the current task as a reader of the specified node, when @@ -202,7 +174,11 @@ macro_rules! define_maps { [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { use dep_graph::DepNodeIndex; - use std::cell::RefCell; + use std::mem; + use errors::Diagnostic; + use errors::FatalError; + use rustc_data_structures::sync::{Lock, LockGuard}; + use rustc_data_structures::OnDrop; define_map_struct! { tcx: $tcx, @@ -214,8 +190,7 @@ macro_rules! define_maps { -> Self { Maps { providers, - query_stack: RefCell::new(vec![]), - $($name: RefCell::new(QueryMap::new())),* + $($name: Lock::new(QueryMap::new())),* } } } @@ -263,7 +238,7 @@ macro_rules! define_maps { impl<$tcx> GetCacheInternal<$tcx> for queries::$name<$tcx> { fn get_cache_internal<'a>(tcx: TyCtxt<'a, $tcx, $tcx>) - -> ::std::cell::Ref<'a, QueryMap<$tcx, Self>> { + -> LockGuard<'a, QueryMap<$tcx, Self>> { tcx.maps.$name.borrow() } } @@ -277,10 +252,47 @@ macro_rules! define_maps { DepNode::new(tcx, $node(*key)) } + fn try_get_lock(tcx: TyCtxt<'a, $tcx, 'lcx>, + mut span: Span, + key: &$K) + -> Result>, + Result<($V, DepNodeIndex), CycleError<$tcx>>> + { + loop { + let lock = tcx.maps.$name.borrow_mut(); + let job = if let Some(value) = lock.map.get(key) { + match *value { + QueryResult::Started(ref job) => Some(job.clone()), + QueryResult::Complete(ref value) => { + profq_msg!(tcx, ProfileQueriesMsg::CacheHit); + return Err(Ok(((&value.value).clone(), value.index))); + }, + QueryResult::Poisoned => FatalError.raise(), + } + } else { + None + }; + let job = if let Some(job) = job { + job + } else { + return Ok(lock); + }; + mem::drop(lock); + + if span == DUMMY_SP && stringify!($name) != "def_span" { + span = key.default_span(tcx); + } + + if let Err(cycle) = job.await(tcx, span) { + return Err(Err(cycle)); + } + } + } + fn try_get_with(tcx: TyCtxt<'a, $tcx, 'lcx>, mut span: Span, key: $K) - -> Result<$V, CycleError<'a, $tcx>> + -> Result<$V, CycleError<$tcx>> { debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})", stringify!($name), @@ -294,24 +306,39 @@ macro_rules! define_maps { ) ); - if let Some(value) = tcx.maps.$name.borrow().map.get(&key) { - profq_msg!(tcx, ProfileQueriesMsg::CacheHit); - tcx.dep_graph.read_index(value.index); - return Ok((&value.value).clone()); + macro_rules! get_lock { + () => {{ + match Self::try_get_lock(tcx, span, &key) { + Ok(lock) => lock, + Err(result) => { + return result.map(|(v, index)| { + tcx.dep_graph.read_index(index); + v + }); + }, + } + }} } + let mut lock = get_lock!(); + // FIXME(eddyb) Get more valid Span's on queries. // def_span guard is necessary to prevent a recursive loop, // default_span calls def_span query internally. if span == DUMMY_SP && stringify!($name) != "def_span" { - span = key.default_span(tcx) + // This might deadlock if we hold the map lock since we might be + // waiting for the def_span query and switch to some other fiber + // So we drop the lock here and reacquire it + mem::drop(lock); + span = key.default_span(tcx); + lock = get_lock!(); } // Fast path for when incr. comp. is off. `to_dep_node` is // expensive for some DepKinds. if !tcx.dep_graph.is_fully_enabled() { let null_dep_node = DepNode::new_no_params(::dep_graph::DepKind::Null); - return Self::force(tcx, key, span, null_dep_node) + return Self::force_with_lock(tcx, key, span, lock, null_dep_node) .map(|(v, _)| v); } @@ -320,34 +347,36 @@ macro_rules! define_maps { if dep_node.kind.is_anon() { profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); - let res = tcx.cycle_check(span, Query::$name(key), || { - tcx.sess.diagnostic().track_diagnostics(|| { - tcx.dep_graph.with_anon_task(dep_node.kind, || { - Self::compute_result(tcx.global_tcx(), key) - }) + let res = Self::start_job(tcx, span, key, lock, |tcx| { + tcx.dep_graph.with_anon_task(dep_node.kind, || { + Self::compute_result(tcx.global_tcx(), key) }) })?; profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); - let ((result, dep_node_index), diagnostics) = res; + let (((result, dep_node_index), diagnostics), job) = res; tcx.dep_graph.read_index(dep_node_index); tcx.on_disk_query_result_cache .store_diagnostics_for_anon_node(dep_node_index, diagnostics); - let value = QueryValue::new(result, dep_node_index); + let value = QueryValue::new(Clone::clone(&result), dep_node_index); + + tcx.maps + .$name + .borrow_mut() + .map + .insert(key, QueryResult::Complete(value)); - return Ok((&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value).clone()); + job.signal_complete(); + + return Ok(result); } if !dep_node.kind.is_input() { + // try_mark_green_and_read may force queries. So we must drop our lock here + mem::drop(lock); if let Some(dep_node_index) = tcx.try_mark_green_and_read(&dep_node) { profq_msg!(tcx, ProfileQueriesMsg::CacheHit); return Self::load_from_disk_and_cache_in_memory(tcx, @@ -356,9 +385,10 @@ macro_rules! define_maps { dep_node_index, &dep_node) } + lock = get_lock!(); } - match Self::force(tcx, key, span, dep_node) { + match Self::force_with_lock(tcx, key, span, lock, dep_node) { Ok((result, dep_node_index)) => { tcx.dep_graph.read_index(dep_node_index); Ok(result) @@ -391,6 +421,63 @@ macro_rules! define_maps { } } + fn start_job(tcx: TyCtxt<'_, $tcx, 'lcx>, + span: Span, + key: $K, + mut map: LockGuard<'_, QueryMap<$tcx, Self>>, + compute: F) + -> Result<((R, Vec), Lrc>), CycleError<$tcx>> + where F: for<'b> FnOnce(TyCtxt<'b, $tcx, 'lcx>) -> R + { + let query = Query::$name(Clone::clone(&key)); + + let entry = StackEntry { + span, + query, + }; + + let (r, job) = ty::tls::with_related_context(tcx, move |icx| { + let job = Lrc::new(QueryJob::new(entry, true, icx.query.clone())); + + map.map.entry(key).or_insert(QueryResult::Started(job.clone())); + + mem::drop(map); + + let r = { + let on_drop = OnDrop(|| { + // Poison the query so jobs waiting on it panics + tcx.maps + .$name + .borrow_mut() + .map + .insert(key, QueryResult::Poisoned); + // Also signal the completion of the job, so waiters + // will continue execution + job.signal_complete(); + }); + + let icx = ty::tls::ImplicitCtxt { + tcx, + query: Some(job.clone()), + }; + + let r = ty::tls::enter_context(&icx, |icx| { + compute(icx.tcx) + }); + + mem::forget(on_drop); + + r + }; + + (r, job) + }); + + let diagnostics: Vec<_> = mem::replace(&mut *job.diagnostics.lock(), Vec::new()); + + Ok(((r, diagnostics), job)) + } + fn compute_result(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> $V { let provider = tcx.maps.providers[key.map_crate()].$name; provider(tcx.global_tcx(), key) @@ -401,8 +488,11 @@ macro_rules! define_maps { span: Span, dep_node_index: DepNodeIndex, dep_node: &DepNode) - -> Result<$V, CycleError<'a, $tcx>> + -> Result<$V, CycleError<$tcx>> { + // Note this function can be called concurrently from the same query + // We must ensure that this is handled correctly + debug_assert!(tcx.dep_graph.is_green(dep_node)); // First we try to load the result from the on-disk cache @@ -425,24 +515,27 @@ macro_rules! define_maps { None }; - let result = if let Some(result) = result { - result + let (result, job) = if let Some(result) = result { + (result, None) } else { // We could not load a result from the on-disk cache, so // recompute. - let (result, _ ) = tcx.cycle_check(span, Query::$name(key), || { - // The diagnostics for this query have already been - // promoted to the current session during - // try_mark_green(), so we can ignore them here. - tcx.sess.diagnostic().track_diagnostics(|| { - // The dep-graph for this computation is already in - // place - tcx.dep_graph.with_ignore(|| { - Self::compute_result(tcx, key) - }) + + // The diagnostics for this query have already been + // promoted to the current session during + // try_mark_green(), so we can ignore them here. + let ((result, _), job) = Self::start_job(tcx, + span, + key, + tcx.maps.$name.borrow_mut(), + |tcx| { + // The dep-graph for this computation is already in + // place + tcx.dep_graph.with_ignore(|| { + Self::compute_result(tcx, key) }) })?; - result + (result, Some(job)) }; // If -Zincremental-verify-ich is specified, re-hash results from @@ -475,43 +568,67 @@ macro_rules! define_maps { tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true); } - let value = QueryValue::new(result, dep_node_index); + let value = QueryValue::new(Clone::clone(&result), dep_node_index); - Ok((&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value).clone()) + tcx.maps + .$name + .borrow_mut() + .map + .insert(key, QueryResult::Complete(value)); + + job.map(|j| j.signal_complete()); + + Ok(result) } + #[allow(dead_code)] fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K, span: Span, dep_node: DepNode) - -> Result<($V, DepNodeIndex), CycleError<'a, $tcx>> { + -> Result<($V, DepNodeIndex), CycleError<$tcx>> { + // We may be concurrently trying both execute and force a query + // Ensure that only one of them runs the query + let lock = match Self::try_get_lock(tcx, span, &key) { + Ok(lock) => lock, + Err(result) => return result, + }; + Self::force_with_lock(tcx, + key, + span, + lock, + dep_node) + } + + fn force_with_lock(tcx: TyCtxt<'a, $tcx, 'lcx>, + key: $K, + span: Span, + map: LockGuard<'_, QueryMap<$tcx, Self>>, + dep_node: DepNode) + -> Result<($V, DepNodeIndex), CycleError<$tcx>> { debug_assert!(!tcx.dep_graph.dep_node_exists(&dep_node)); profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); - let res = tcx.cycle_check(span, Query::$name(key), || { - tcx.sess.diagnostic().track_diagnostics(|| { - if dep_node.kind.is_eval_always() { - tcx.dep_graph.with_eval_always_task(dep_node, - tcx, - key, - Self::compute_result) - } else { - tcx.dep_graph.with_task(dep_node, - tcx, - key, - Self::compute_result) - } - }) + let res = Self::start_job(tcx, + span, + key, + map, + |tcx| { + if dep_node.kind.is_eval_always() { + tcx.dep_graph.with_eval_always_task(dep_node, + tcx, + key, + Self::compute_result) + } else { + tcx.dep_graph.with_task(dep_node, + tcx, + key, + Self::compute_result) + } })?; profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); - let ((result, dep_node_index), diagnostics) = res; + let (((result, dep_node_index), diagnostics), job) = res; if tcx.sess.opts.debugging_opts.query_dep_graph { tcx.dep_graph.mark_loaded_from_cache(dep_node_index, false); @@ -522,16 +639,19 @@ macro_rules! define_maps { .store_diagnostics(dep_node_index, diagnostics); } - let value = QueryValue::new(result, dep_node_index); + let value = QueryValue::new(Clone::clone(&result), dep_node_index); + + tcx.maps + .$name + .borrow_mut() + .map + .insert(key, QueryResult::Complete(value)); + + let job: Lrc = job; + + job.signal_complete(); - Ok(((&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value).clone(), - dep_node_index)) + Ok((result, dep_node_index)) } pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) @@ -599,8 +719,7 @@ macro_rules! define_map_struct { input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { pub struct Maps<$tcx> { providers: IndexVec>, - query_stack: RefCell)>>, - $($(#[$attr])* $name: RefCell>>,)* + $($(#[$attr])* $name: Lock>>,)* } }; } diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index a25c3668bb13b..23a688636e997 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -42,7 +42,6 @@ use rustc_data_structures::stable_hasher::StableHasher; use std::borrow::Cow; use std::cell::{RefCell, Cell}; -use std::mem; use std::{error, fmt}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::SeqCst; @@ -269,7 +268,6 @@ pub struct Handler { emitter: RefCell>, continue_after_error: Cell, delayed_span_bug: RefCell>, - tracked_diagnostics: RefCell>>, // This set contains the `DiagnosticId` of all emitted diagnostics to avoid // emitting the same diagnostic with extended help (`--teach`) twice, which @@ -282,6 +280,11 @@ pub struct Handler { emitted_diagnostics: RefCell>, } +fn default_track_diagnostic(_: &Diagnostic) {} + +thread_local!(pub static TRACK_DIAGNOSTICS: Cell = + Cell::new(default_track_diagnostic)); + #[derive(Default)] pub struct HandlerFlags { pub can_emit_warnings: bool, @@ -333,7 +336,6 @@ impl Handler { emitter: RefCell::new(e), continue_after_error: Cell::new(true), delayed_span_bug: RefCell::new(None), - tracked_diagnostics: RefCell::new(None), tracked_diagnostic_codes: RefCell::new(FxHashSet()), emitted_diagnostics: RefCell::new(FxHashSet()), } @@ -629,17 +631,6 @@ impl Handler { } } - pub fn track_diagnostics(&self, f: F) -> (R, Vec) - where F: FnOnce() -> R - { - let prev = mem::replace(&mut *self.tracked_diagnostics.borrow_mut(), - Some(Vec::new())); - let ret = f(); - let diagnostics = mem::replace(&mut *self.tracked_diagnostics.borrow_mut(), prev) - .unwrap(); - (ret, diagnostics) - } - /// `true` if a diagnostic with this code has already been emitted in this handler. /// /// Used to suppress emitting the same error multiple times with extended explanation when @@ -651,9 +642,9 @@ impl Handler { fn emit_db(&self, db: &DiagnosticBuilder) { let diagnostic = &**db; - if let Some(ref mut list) = *self.tracked_diagnostics.borrow_mut() { - list.push(diagnostic.clone()); - } + TRACK_DIAGNOSTICS.with(|track_diagnostics| { + track_diagnostics.get()(diagnostic); + }); if let Some(ref code) = diagnostic.code { self.tracked_diagnostic_codes.borrow_mut().insert(code.clone()); From 4f7d0fde1c5f577c1f956d5d4edfbb202a5bc3cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sat, 24 Mar 2018 06:19:20 +0100 Subject: [PATCH 2/2] Some cleanups and added comments --- src/librustc/ty/context.rs | 92 +++++++++++++++++++++++--------- src/librustc/ty/maps/job.rs | 69 ++++++++++++------------ src/librustc/ty/maps/mod.rs | 2 +- src/librustc/ty/maps/plumbing.rs | 80 ++++++++++++++++++--------- 4 files changed, 157 insertions(+), 86 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index adb0ecd39846a..44b9d61cf0202 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1489,10 +1489,13 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { /// Call the closure with a local `TyCtxt` using the given arena. - pub fn enter_local(&self, - arena: &'tcx DroplessArena, - f: F) -> R - where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + pub fn enter_local( + &self, + arena: &'tcx DroplessArena, + f: F + ) -> R + where + F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { let interners = CtxtInterners::new(arena); let tcx = TyCtxt { @@ -1665,12 +1668,23 @@ pub mod tls { use rustc_data_structures::OnDrop; use rustc_data_structures::sync::Lrc; + /// This is the implicit state of rustc. It contains the current + /// TyCtxt and query. It is updated when creating a local interner or + /// executing a new query. Whenever there's a TyCtxt value available + /// you should also have access to an ImplicitCtxt through the functions + /// in this module. #[derive(Clone)] pub struct ImplicitCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + /// The current TyCtxt. Initially created by `enter_global` and updated + /// by `enter_local` with a new local interner pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + + /// The current query job, if any. This is updated by start_job in + /// ty::maps::plumbing when executing a query pub query: Option>>, } + // A thread local value which stores a pointer to the current ImplicitCtxt thread_local!(static TLV: Cell = Cell::new(0)); fn set_tlv R, R>(value: usize, f: F) -> R { @@ -1684,12 +1698,17 @@ pub mod tls { TLV.with(|tlv| tlv.get()) } + /// This is a callback from libsyntax as it cannot access the implicit state + /// in librustc otherwise fn span_debug(span: syntax_pos::Span, f: &mut fmt::Formatter) -> fmt::Result { with(|tcx| { write!(f, "{}", tcx.sess.codemap().span_to_string(span)) }) } + /// This is a callback from libsyntax as it cannot access the implicit state + /// in librustc otherwise. It is used to when diagnostic messages are + /// emitted and stores them in the current query, if there is one. fn track_diagnostic(diagnostic: &Diagnostic) { with_context(|context| { if let Some(ref query) = context.query { @@ -1698,6 +1717,7 @@ pub mod tls { }) } + /// Sets up the callbacks from libsyntax on the current thread pub fn with_thread_locals(f: F) -> R where F: FnOnce() -> R { @@ -1722,6 +1742,20 @@ pub mod tls { }) } + /// Sets `context` as the new current ImplicitCtxt for the duration of the function `f` + pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>, + f: F) -> R + where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R + { + set_tlv(context as *const _ as usize, || { + f(&context) + }) + } + + /// Enters GlobalCtxt by setting up libsyntax callbacks and + /// creating a initial TyCtxt and ImplicitCtxt. + /// This happens once per rustc session and TyCtxts only exists + /// inside the `f` function. pub fn enter_global<'gcx, F, R>(gcx: &GlobalCtxt<'gcx>, f: F) -> R where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R { @@ -1740,15 +1774,7 @@ pub mod tls { }) } - pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>, - f: F) -> R - where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R - { - set_tlv(context as *const _ as usize, || { - f(&context) - }) - } - + /// Allows access to the current ImplicitCtxt in a closure if one is available pub fn with_context_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R { @@ -1760,46 +1786,62 @@ pub mod tls { } } - pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R - where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R + /// Allows access to the current ImplicitCtxt. + /// Panics if there is no ImplicitCtxt available + pub fn with_context(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R + { + with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls"))) + } + + /// Allows access to the current ImplicitCtxt whose tcx field has the same global + /// interner as the tcx argument passed in. This means the closure is given an ImplicitCtxt + /// with the same 'gcx lifetime as the TyCtxt passed in. + /// This will panic if you pass it a TyCtxt which has a different global interner from + /// the current ImplicitCtxt's tcx field. + pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R + where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R { with_context(|context| { unsafe { let gcx = tcx.gcx as *const _ as usize; - let interners = tcx.interners as *const _ as usize; assert!(context.tcx.gcx as *const _ as usize == gcx); - assert!(context.tcx.interners as *const _ as usize == interners); let context: &ImplicitCtxt = mem::transmute(context); f(context) } }) } - pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R - where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R + /// Allows access to the current ImplicitCtxt whose tcx field has the same global + /// interner and local interner as the tcx argument passed in. This means the closure + /// is given an ImplicitCtxt with the same 'tcx and 'gcx lifetimes as the TyCtxt passed in. + /// This will panic if you pass it a TyCtxt which has a different global interner or + /// a different local interner from the current ImplicitCtxt's tcx field. + pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R + where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R { with_context(|context| { unsafe { let gcx = tcx.gcx as *const _ as usize; + let interners = tcx.interners as *const _ as usize; assert!(context.tcx.gcx as *const _ as usize == gcx); + assert!(context.tcx.interners as *const _ as usize == interners); let context: &ImplicitCtxt = mem::transmute(context); f(context) } }) } - pub fn with_context(f: F) -> R - where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R - { - with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls"))) - } - + /// Allows access to the TyCtxt in the current ImplicitCtxt. + /// Panics if there is no ImplicitCtxt available pub fn with(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { with_context(|context| f(context.tcx)) } + /// Allows access to the TyCtxt in the current ImplicitCtxt. + /// The closure is passed None if there is no ImplicitCtxt available pub fn with_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option>) -> R { diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 2ed993bd975e5..7d756fb16a453 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -8,65 +8,69 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(warnings)] - -use std::mem; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; -use rustc_data_structures::sync::{Lock, LockGuard, Lrc}; +use rustc_data_structures::sync::{Lock, Lrc}; use syntax_pos::Span; use ty::tls; use ty::maps::Query; use ty::maps::plumbing::CycleError; use ty::context::TyCtxt; use errors::Diagnostic; -use std::process; -use std::fmt; -use std::sync::{Arc, Mutex}; -use std::collections::HashSet; -pub struct PoisonedJob; +/// Indicates the state of a query for a given key in a query map +pub(super) enum QueryResult<'tcx, T> { + /// An already executing query. The query job can be used to await for its completion + Started(Lrc>), + + /// The query is complete and produced `T` + Complete(T), + /// The query panicked. Queries trying to wait on this will raise a fatal error / silently panic + Poisoned, +} + +/// A span and a query key #[derive(Clone, Debug)] -pub struct StackEntry<'tcx> { +pub struct QueryInfo<'tcx> { pub span: Span, pub query: Query<'tcx>, } +/// A object representing an active query job. pub struct QueryJob<'tcx> { - pub entry: StackEntry<'tcx>, + pub info: QueryInfo<'tcx>, + + /// The parent query job which created this job and is implicitly waiting on it. pub parent: Option>>, - pub track_diagnostics: bool, + + /// Diagnostic messages which are emitted while the query executes pub diagnostics: Lock>, } impl<'tcx> QueryJob<'tcx> { - pub fn new( - entry: StackEntry<'tcx>, - track_diagnostics: bool, - parent: Option>>, - ) -> Self { + /// Creates a new query job + pub fn new(info: QueryInfo<'tcx>, parent: Option>>) -> Self { QueryJob { - track_diagnostics, diagnostics: Lock::new(Vec::new()), - entry, + info, parent, } } + /// Awaits for the query job to complete. + /// + /// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any + /// query that means that there is a query cycle, thus this always running a cycle error. pub(super) fn await<'lcx>( &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, span: Span, ) -> Result<(), CycleError<'tcx>> { - // The query is already executing, so this must be a cycle for single threaded rustc, - // so we find the cycle and return it - + // Get the current executing query (waiter) and find the waitee amongst its parents let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone()); let mut cycle = Vec::new(); while let Some(job) = current_job { - cycle.insert(0, job.entry.clone()); + cycle.insert(0, job.info.clone()); if &*job as *const _ == self as *const _ { break; @@ -78,14 +82,9 @@ impl<'tcx> QueryJob<'tcx> { Err(CycleError { span, cycle }) } - pub fn signal_complete(&self) { - // Signals to waiters that the query is complete. - // This is a no-op for single threaded rustc - } -} - -pub(super) enum QueryResult<'tcx, T> { - Started(Lrc>), - Complete(T), - Poisoned, + /// Signals to waiters that the query is complete. + /// + /// This does nothing for single threaded rustc, + /// as there are no concurrent jobs which could be waiting on us + pub fn signal_complete(&self) {} } diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index 6dd31baa5931b..f4977be78776b 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -67,7 +67,7 @@ use self::plumbing::*; pub use self::plumbing::force_from_dep_node; mod job; -pub use self::job::{QueryJob, StackEntry, PoisonedJob}; +pub use self::job::{QueryJob, QueryInfo}; use self::job::QueryResult; mod keys; diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs index 895bf3d797336..c21b53cd427c0 100644 --- a/src/librustc/ty/maps/plumbing.rs +++ b/src/librustc/ty/maps/plumbing.rs @@ -16,7 +16,7 @@ use dep_graph::{DepNodeIndex, DepNode, DepKind, DepNodeColor}; use errors::DiagnosticBuilder; use ty::{TyCtxt}; use ty::maps::config::QueryDescription; -use ty::maps::job::{QueryResult, StackEntry}; +use ty::maps::job::{QueryResult, QueryInfo}; use ty::item_path; use rustc_data_structures::fx::{FxHashMap}; @@ -62,7 +62,18 @@ pub(super) trait GetCacheInternal<'tcx>: QueryDescription<'tcx> + Sized { #[derive(Clone)] pub(super) struct CycleError<'tcx> { pub(super) span: Span, - pub(super) cycle: Vec>, + pub(super) cycle: Vec>, +} + +/// The result of `try_get_lock` +pub(super) enum TryGetLock<'a, 'tcx: 'a, T, D: QueryDescription<'tcx> + 'a> { + /// The query is not yet started. Contains a guard to the map eventually used to start it. + NotYetStarted(LockGuard<'a, QueryMap<'tcx, D>>), + + /// The query was already completed. + /// Returns the result of the query and its dep node index + /// if it succeeded or a cycle error if it failed + JobCompleted(Result<(T, DepNodeIndex), CycleError<'tcx>>), } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { @@ -85,7 +96,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { err.span_note(self.sess.codemap().def_span(stack[0].span), &format!("the cycle begins when {}...", stack[0].query.describe(self))); - for &StackEntry { span, ref query, .. } in &stack[1..] { + for &QueryInfo { span, ref query, .. } in &stack[1..] { err.span_note(self.sess.codemap().def_span(span), &format!("...which then requires {}...", query.describe(self))); } @@ -252,11 +263,14 @@ macro_rules! define_maps { DepNode::new(tcx, $node(*key)) } - fn try_get_lock(tcx: TyCtxt<'a, $tcx, 'lcx>, - mut span: Span, - key: &$K) - -> Result>, - Result<($V, DepNodeIndex), CycleError<$tcx>>> + /// Either get the lock of the query map, allowing us to + /// start executing the query, or it returns with the result of the query. + /// If the query already executed and panicked, this will fatal error / silently panic + fn try_get_lock( + tcx: TyCtxt<'a, $tcx, 'lcx>, + mut span: Span, + key: &$K + ) -> TryGetLock<'a, $tcx, $V, Self> { loop { let lock = tcx.maps.$name.borrow_mut(); @@ -265,7 +279,8 @@ macro_rules! define_maps { QueryResult::Started(ref job) => Some(job.clone()), QueryResult::Complete(ref value) => { profq_msg!(tcx, ProfileQueriesMsg::CacheHit); - return Err(Ok(((&value.value).clone(), value.index))); + let result = Ok(((&value.value).clone(), value.index)); + return TryGetLock::JobCompleted(result); }, QueryResult::Poisoned => FatalError.raise(), } @@ -275,16 +290,19 @@ macro_rules! define_maps { let job = if let Some(job) = job { job } else { - return Ok(lock); + return TryGetLock::NotYetStarted(lock); }; mem::drop(lock); + // This just matches the behavior of `try_get_with` so the span when + // we await matches the span we would use when executing. + // See the FIXME there. if span == DUMMY_SP && stringify!($name) != "def_span" { span = key.default_span(tcx); } if let Err(cycle) = job.await(tcx, span) { - return Err(Err(cycle)); + return TryGetLock::JobCompleted(Err(cycle)); } } } @@ -306,21 +324,23 @@ macro_rules! define_maps { ) ); - macro_rules! get_lock { + /// Get the lock used to start the query or + /// return the result of the completed query + macro_rules! get_lock_or_return { () => {{ match Self::try_get_lock(tcx, span, &key) { - Ok(lock) => lock, - Err(result) => { + TryGetLock::NotYetStarted(lock) => lock, + TryGetLock::JobCompleted(result) => { return result.map(|(v, index)| { tcx.dep_graph.read_index(index); v - }); - }, + }) + } } }} } - let mut lock = get_lock!(); + let mut lock = get_lock_or_return!(); // FIXME(eddyb) Get more valid Span's on queries. // def_span guard is necessary to prevent a recursive loop, @@ -331,7 +351,7 @@ macro_rules! define_maps { // So we drop the lock here and reacquire it mem::drop(lock); span = key.default_span(tcx); - lock = get_lock!(); + lock = get_lock_or_return!(); } // Fast path for when incr. comp. is off. `to_dep_node` is @@ -385,7 +405,7 @@ macro_rules! define_maps { dep_node_index, &dep_node) } - lock = get_lock!(); + lock = get_lock_or_return!(); } match Self::force_with_lock(tcx, key, span, lock, dep_node) { @@ -421,6 +441,9 @@ macro_rules! define_maps { } } + /// Creates a job for the query and updates the query map indicating that it started. + /// Then it changes ImplicitCtxt to point to the new query job while it executes. + /// If the query panics, this updates the query map to indicate so. fn start_job(tcx: TyCtxt<'_, $tcx, 'lcx>, span: Span, key: $K, @@ -431,21 +454,25 @@ macro_rules! define_maps { { let query = Query::$name(Clone::clone(&key)); - let entry = StackEntry { + let entry = QueryInfo { span, query, }; + // The TyCtxt stored in TLS has the same global interner lifetime + // as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes + // when accessing the ImplicitCtxt let (r, job) = ty::tls::with_related_context(tcx, move |icx| { - let job = Lrc::new(QueryJob::new(entry, true, icx.query.clone())); + let job = Lrc::new(QueryJob::new(entry, icx.query.clone())); + // Store the job in the query map and drop the lock to allow + // others to wait it map.map.entry(key).or_insert(QueryResult::Started(job.clone())); - mem::drop(map); let r = { let on_drop = OnDrop(|| { - // Poison the query so jobs waiting on it panics + // Poison the query so jobs waiting on it panic tcx.maps .$name .borrow_mut() @@ -456,11 +483,13 @@ macro_rules! define_maps { job.signal_complete(); }); + // Update the ImplicitCtxt to point to our new query job let icx = ty::tls::ImplicitCtxt { tcx, query: Some(job.clone()), }; + // Use the ImplicitCtxt while we execute the query let r = ty::tls::enter_context(&icx, |icx| { compute(icx.tcx) }); @@ -473,6 +502,7 @@ macro_rules! define_maps { (r, job) }); + // Extract the diagnostic from the job let diagnostics: Vec<_> = mem::replace(&mut *job.diagnostics.lock(), Vec::new()); Ok(((r, diagnostics), job)) @@ -590,8 +620,8 @@ macro_rules! define_maps { // We may be concurrently trying both execute and force a query // Ensure that only one of them runs the query let lock = match Self::try_get_lock(tcx, span, &key) { - Ok(lock) => lock, - Err(result) => return result, + TryGetLock::NotYetStarted(lock) => lock, + TryGetLock::JobCompleted(result) => return result, }; Self::force_with_lock(tcx, key,