From 4f975b0f1366643a2a1fb2f9c75d5f45c73da7da Mon Sep 17 00:00:00 2001 From: Valentin Churavy Date: Sun, 19 Nov 2023 16:17:48 -0500 Subject: [PATCH 1/3] Tag CodeInstance for external abstract interpreter External abstract interpreters need to be able to take part in invalidation, as well as cross-session inference result caching. MethodInstance.cache contains a set of CodeInstances which hold the inference cache results. Instead of haveing external abstract interpreters create detached CodeInstances we attach them the the MethodInstance they were created from. This solves the invalidation problem and the cross-session cache problem in one go, but requires that we tag/mark the CodeInstance with a token to remember which abstract interpreter created them. `nothing` is reserved for the native interpreter and all other interpreters can use values that compare with `jl_egal`. To perform a cache-lookup we need the current abstract interpreter token. Co-authored-by: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> --- base/boot.jl | 6 +- base/compiler/cicache.jl | 11 +- base/compiler/typeinfer.jl | 2 +- base/compiler/types.jl | 2 + base/compiler/utilities.jl | 19 --- base/reflection.jl | 9 ++ doc/src/devdocs/ast.md | 4 + doc/src/devdocs/locks.md | 1 + src/builtins.c | 1 + src/gf.c | 96 +++++------- src/jltypes.c | 28 ++-- src/julia.h | 4 +- src/julia_internal.h | 7 +- src/method.c | 1 - src/opaque_closure.c | 2 +- src/staticdata.c | 7 +- src/staticdata_utils.c | 50 ++++--- stdlib/REPL/src/REPLCompletions.jl | 34 +---- test/compiler/AbstractInterpreter.jl | 5 +- test/compiler/EscapeAnalysis/EAUtils.jl | 33 +--- test/compiler/invalidation.jl | 191 ++++++++++++++++-------- test/compiler/newinterp.jl | 15 +- test/core.jl | 2 +- test/precompile.jl | 72 ++++++++- test/reflection.jl | 3 +- 25 files changed, 345 insertions(+), 260 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index ed26536b961ff..8ca6d392ead67 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -483,13 +483,13 @@ eval(Core, quote end) function CodeInstance( - mi::MethodInstance, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), + mi::MethodInstance, owner, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), @nospecialize(inferred), const_flags::Int32, min_world::UInt, max_world::UInt, ipo_effects::UInt32, effects::UInt32, @nospecialize(analysis_results), relocatability::UInt8) return ccall(:jl_new_codeinst, Ref{CodeInstance}, - (Any, Any, Any, Any, Any, Int32, UInt, UInt, UInt32, UInt32, Any, UInt8), - mi, rettype, exctype, inferred_const, inferred, const_flags, min_world, max_world, + (Any, Any, Any, Any, Any, Any, Int32, UInt, UInt, UInt32, UInt32, Any, UInt8), + mi, owner, rettype, exctype, inferred_const, inferred, const_flags, min_world, max_world, ipo_effects, effects, analysis_results, relocatability) end diff --git a/base/compiler/cicache.jl b/base/compiler/cicache.jl index 8332777e6d5bc..fbd9530e74f7b 100644 --- a/base/compiler/cicache.jl +++ b/base/compiler/cicache.jl @@ -7,14 +7,17 @@ Internally, each `MethodInstance` keep a unique global cache of code instances that have been created for the given method instance, stratified by world age ranges. This struct abstracts over access to this cache. """ -struct InternalCodeCache end +struct InternalCodeCache + owner::Any # `jl_egal` is used for comparison +end function setindex!(cache::InternalCodeCache, ci::CodeInstance, mi::MethodInstance) + @assert ci.owner === cache.owner ccall(:jl_mi_cache_insert, Cvoid, (Any, Any), mi, ci) return cache end -const GLOBAL_CI_CACHE = InternalCodeCache() +const GLOBAL_CI_CACHE = InternalCodeCache(nothing) struct WorldRange min_world::UInt @@ -49,11 +52,11 @@ WorldView(wvc::WorldView, wr::WorldRange) = WorldView(wvc.cache, wr) WorldView(wvc::WorldView, args...) = WorldView(wvc.cache, args...) function haskey(wvc::WorldView{InternalCodeCache}, mi::MethodInstance) - return ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), mi, first(wvc.worlds), last(wvc.worlds)) !== nothing + return ccall(:jl_rettype_inferred, Any, (Any, Any, UInt, UInt), wvc.cache.owner, mi, first(wvc.worlds), last(wvc.worlds)) !== nothing end function get(wvc::WorldView{InternalCodeCache}, mi::MethodInstance, default) - r = ccall(:jl_rettype_inferred, Any, (Any, UInt, UInt), mi, first(wvc.worlds), last(wvc.worlds)) + r = ccall(:jl_rettype_inferred, Any, (Any, Any, UInt, UInt), wvc.cache.owner, mi, first(wvc.worlds), last(wvc.worlds)) if r === nothing return default end diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index e9edd04e59d40..e30c6e5f96fcb 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -326,7 +326,7 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult, end end # relocatability = isa(inferred_result, String) ? inferred_result[end] : UInt8(0) - return CodeInstance(result.linfo, + return CodeInstance(result.linfo, cache_owner(interp), widenconst(result_type), widenconst(result.exc_result), rettype_const, inferred_result, const_flags, first(valid_worlds), last(valid_worlds), # TODO: Actually do something with non-IPO effects diff --git a/base/compiler/types.jl b/base/compiler/types.jl index cdf90a3a6e78c..fe6e21621f8fd 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -14,6 +14,7 @@ the following methods to satisfy the `AbstractInterpreter` API requirement: - `OptimizationParams(interp::NewInterpreter)` - return an `OptimizationParams` instance - `get_inference_world(interp::NewInterpreter)` - return the world age for this interpreter - `get_inference_cache(interp::NewInterpreter)` - return the local inference cache +- `cache_owner(interp::NewInterpreter)` - return the owner of any new cache entries - `code_cache(interp::NewInterpreter)` - return the global inference cache """ :(AbstractInterpreter) @@ -405,6 +406,7 @@ OptimizationParams(interp::NativeInterpreter) = interp.opt_params get_inference_world(interp::NativeInterpreter) = interp.world get_inference_cache(interp::NativeInterpreter) = interp.inf_cache code_cache(interp::NativeInterpreter) = WorldView(GLOBAL_CI_CACHE, get_inference_world(interp)) +cache_owner(interp::NativeInterpreter) = nothing """ already_inferred_quick_test(::AbstractInterpreter, ::MethodInstance) diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index 368395e714054..ef2f37d8e9ce2 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -321,25 +321,6 @@ function iterate(iter::BackedgeIterator, i::Int=1) return BackedgePair(item, backedges[i+1]::MethodInstance), i+2 # `invoke` calls end -""" - add_invalidation_callback!(callback, mi::MethodInstance) - -Register `callback` to be triggered upon the invalidation of `mi`. -`callback` should a function taking two arguments, `callback(replaced::MethodInstance, max_world::UInt32)`, -and it will be recursively invoked on `MethodInstance`s within the invalidation graph. -""" -function add_invalidation_callback!(@nospecialize(callback), mi::MethodInstance) - if !isdefined(mi, :callbacks) - callbacks = mi.callbacks = Any[callback] - else - callbacks = mi.callbacks::Vector{Any} - if !any(@nospecialize(cb)->cb===callback, callbacks) - push!(callbacks, callback) - end - end - return callbacks -end - ######### # types # ######### diff --git a/base/reflection.jl b/base/reflection.jl index 8c3ec226d6e22..d6adcea6100ae 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1332,6 +1332,15 @@ function method_instances(@nospecialize(f), @nospecialize(t), world::UInt) return results end +function method_instance(@nospecialize(f), @nospecialize(t); + world=Base.get_world_counter(), method_table=nothing) + tt = signature_type(f, t) + mi = ccall(:jl_method_lookup_by_tt, Any, + (Any, Csize_t, Any), + tt, world, method_table) + return mi::Union{Nothing, MethodInstance} +end + default_debug_info_kind() = unsafe_load(cglobal(:jl_default_debug_info_kind, Cint)) # this type mirrors jl_cgparams_t (documented in julia.h) diff --git a/doc/src/devdocs/ast.md b/doc/src/devdocs/ast.md index f829b27663e62..c2cd0e92d58d2 100644 --- a/doc/src/devdocs/ast.md +++ b/doc/src/devdocs/ast.md @@ -627,6 +627,10 @@ for important details on how to modify these fields safely. The `MethodInstance` that this cache entry is derived from. + * `owner` + + A token that represents the owner of this `CodeInstance`. Will use `jl_egal` to match. + * `rettype`/`rettype_const` diff --git a/doc/src/devdocs/locks.md b/doc/src/devdocs/locks.md index f79f4f2b1e3e3..50cdd738e3b34 100644 --- a/doc/src/devdocs/locks.md +++ b/doc/src/devdocs/locks.md @@ -155,6 +155,7 @@ MethodInstance/CodeInstance updates : Method->writelock, codegen lock > * specTypes > * sparam_vals > * def +> * owner > * These are set by `jl_type_infer` (while holding codegen lock): > * cache diff --git a/src/builtins.c b/src/builtins.c index 412ccaf8bab04..29aec53ae5a40 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -2378,6 +2378,7 @@ jl_fptr_args_t jl_get_builtin_fptr(jl_datatype_t *dt) jl_typemap_entry_t *entry = (jl_typemap_entry_t*)jl_atomic_load_relaxed(&dt->name->mt->defs); jl_method_instance_t *mi = jl_atomic_load_relaxed(&entry->func.method->unspecialized); jl_code_instance_t *ci = jl_atomic_load_relaxed(&mi->cache); + assert(ci->owner == jl_nothing); return jl_atomic_load_relaxed(&ci->specptr.fptr1); } diff --git a/src/gf.c b/src/gf.c index e8a5f7450b724..00613cf9da486 100644 --- a/src/gf.c +++ b/src/gf.c @@ -316,7 +316,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a jl_atomic_store_relaxed(&m->unspecialized, mi); jl_gc_wb(m, mi); - jl_code_instance_t *codeinst = jl_new_codeinst(mi, + jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, jl_nothing, jl_nothing, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0); jl_mi_cache_insert(mi, codeinst); @@ -437,11 +437,13 @@ JL_DLLEXPORT jl_value_t *jl_call_in_typeinf_world(jl_value_t **args, int nargs) return ret; } -JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +STATIC_INLINE jl_value_t *_jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT { jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); while (codeinst) { - if (jl_atomic_load_relaxed(&codeinst->min_world) <= min_world && max_world <= jl_atomic_load_relaxed(&codeinst->max_world)) { + if (jl_atomic_load_relaxed(&codeinst->min_world) <= min_world && + max_world <= jl_atomic_load_relaxed(&codeinst->max_world) && + jl_egal(codeinst->owner, owner)) { jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred); if (code && (code == jl_nothing || jl_ir_flag_inferred(code))) return (jl_value_t*)codeinst; @@ -450,24 +452,37 @@ JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t mi } return (jl_value_t*)jl_nothing; } -JL_DLLEXPORT jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT = jl_rettype_inferred; + +JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +{ + return (jl_value_t*)_jl_rettype_inferred(owner, mi, min_world, max_world); +} + +JL_DLLEXPORT jl_value_t *jl_rettype_inferred_native(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +{ + return (jl_value_t*)_jl_rettype_inferred(jl_nothing, mi, min_world, max_world); +} + +JL_DLLEXPORT jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT = jl_rettype_inferred_native; JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, size_t min_world, size_t max_world) { + jl_value_t *owner = jl_nothing; // TODO: owner should be arg jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); while (codeinst) { if (jl_atomic_load_relaxed(&codeinst->min_world) == min_world && jl_atomic_load_relaxed(&codeinst->max_world) == max_world && + jl_egal(codeinst->owner, owner) && jl_egal(codeinst->rettype, rettype)) { return codeinst; } codeinst = jl_atomic_load_relaxed(&codeinst->next); } codeinst = jl_new_codeinst( - mi, rettype, (jl_value_t*)jl_any_type, NULL, NULL, + mi, owner, rettype, (jl_value_t*)jl_any_type, NULL, NULL, 0, min_world, max_world, 0, 0, jl_nothing, 0); jl_mi_cache_insert(mi, codeinst); return codeinst; @@ -484,7 +499,8 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_codeinst_for_src( } JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( - jl_method_instance_t *mi, jl_value_t *rettype, jl_value_t *exctype, + jl_method_instance_t *mi, jl_value_t *owner, + jl_value_t *rettype, jl_value_t *exctype, jl_value_t *inferred_const, jl_value_t *inferred, int32_t const_flags, size_t min_world, size_t max_world, uint32_t ipo_effects, uint32_t effects, jl_value_t *analysis_results, @@ -496,6 +512,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( jl_code_instance_t *codeinst = (jl_code_instance_t*)jl_gc_alloc(ct->ptls, sizeof(jl_code_instance_t), jl_code_instance_type); codeinst->def = mi; + codeinst->owner = owner; jl_atomic_store_relaxed(&codeinst->min_world, min_world); jl_atomic_store_relaxed(&codeinst->max_world, max_world); codeinst->rettype = rettype; @@ -548,7 +565,7 @@ static int get_method_unspec_list(jl_typemap_entry_t *def, void *closure) if (!jl_is_svec(specializations)) { jl_method_instance_t *mi = (jl_method_instance_t*)specializations; assert(jl_is_method_instance(mi)); - if (jl_rettype_inferred(mi, world, world) == jl_nothing) + if (jl_rettype_inferred_native(mi, world, world) == jl_nothing) jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)mi); return 1; } @@ -558,7 +575,7 @@ static int get_method_unspec_list(jl_typemap_entry_t *def, void *closure) jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i); if ((jl_value_t*)mi != jl_nothing) { assert(jl_is_method_instance(mi)); - if (jl_rettype_inferred(mi, world, world) == jl_nothing) + if (jl_rettype_inferred_native(mi, world, world) == jl_nothing) jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)mi); } } @@ -678,7 +695,7 @@ JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f) size_t i, l; for (i = 0, l = jl_array_nrows(unspec); i < l; i++) { jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i); - if (jl_rettype_inferred(mi, world, world) == jl_nothing) + if (jl_rettype_inferred_native(mi, world, world) == jl_nothing) jl_type_infer(mi, world, 1); } JL_GC_POP(); @@ -1627,41 +1644,6 @@ JL_DLLEXPORT jl_value_t *jl_debug_method_invalidation(int state) return jl_nothing; } -// call external callbacks registered with this method_instance -static void invalidate_external(jl_method_instance_t *mi, size_t max_world) { - jl_array_t *callbacks = mi->callbacks; - if (callbacks) { - // AbstractInterpreter allows for MethodInstances to be present in non-local caches - // inform those caches about the invalidation. - JL_TRY { - size_t i, l = jl_array_nrows(callbacks); - jl_value_t **args; - JL_GC_PUSHARGS(args, 3); - // these arguments are constant per call - args[1] = (jl_value_t*)mi; - args[2] = jl_box_uint32(max_world); - - jl_task_t *ct = jl_current_task; - size_t last_age = ct->world_age; - ct->world_age = jl_get_world_counter(); - - jl_value_t **cbs = (jl_value_t**)jl_array_ptr_data(callbacks); - for (i = 0; i < l; i++) { - args[0] = cbs[i]; - jl_apply(args, 3); - } - ct->world_age = last_age; - JL_GC_POP(); - } - JL_CATCH { - jl_printf((JL_STREAM*)STDERR_FILENO, "error in invalidation callback: "); - jl_static_show((JL_STREAM*)STDERR_FILENO, jl_current_exception()); - jl_printf((JL_STREAM*)STDERR_FILENO, "\n"); - jlbacktrace(); // written to STDERR_FILENO - } - } -} - static void _invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, int depth); // recursively invalidate cached methods that had an edge to a replaced method @@ -1690,7 +1672,6 @@ static void invalidate_method_instance(jl_method_instance_t *replaced, size_t ma codeinst = jl_atomic_load_relaxed(&codeinst->next); } JL_GC_PUSH1(&replaced); - invalidate_external(replaced, max_world); // recurse to all backedges to update their valid range also _invalidate_backedges(replaced, max_world, depth + 1); JL_GC_POP(); @@ -1839,7 +1820,6 @@ static int invalidate_mt_cache(jl_typemap_entry_t *oldentry, void *closure0) } } if (intersects) { - // TODO call invalidate_external here? if (_jl_debug_method_invalidation) { jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)mi); jl_value_t *loctag = jl_cstr_to_string("invalidate_mt_cache"); @@ -1924,7 +1904,6 @@ static void jl_method_table_invalidate(jl_methtable_t *mt, jl_typemap_entry_t *m jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i); if ((jl_value_t*)mi != jl_nothing) { invalidated = 1; - invalidate_external(mi, max_world); invalidate_backedges(mi, max_world, "jl_method_table_disable"); } } @@ -2202,7 +2181,6 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry) jl_array_del_end(backedges, nb - insb); } jl_array_ptr_1d_push(oldmi, (jl_value_t*)mi); - invalidate_external(mi, max_world); if (_jl_debug_method_invalidation && invalidated) { jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)mi); loctag = jl_cstr_to_string("jl_method_table_insert"); @@ -2302,7 +2280,7 @@ static jl_tupletype_t *lookup_arg_type_tuple(jl_value_t *arg1 JL_PROPAGATES_ROOT return jl_lookup_arg_tuple_type(arg1, args, nargs, 1); } -JL_DLLEXPORT jl_method_instance_t *jl_method_lookup_by_tt(jl_tupletype_t *tt, size_t world, jl_value_t *_mt) +JL_DLLEXPORT jl_value_t *jl_method_lookup_by_tt(jl_tupletype_t *tt, size_t world, jl_value_t *_mt) { jl_methtable_t *mt = NULL; if (_mt == jl_nothing) @@ -2311,7 +2289,10 @@ JL_DLLEXPORT jl_method_instance_t *jl_method_lookup_by_tt(jl_tupletype_t *tt, si assert(jl_isa(_mt, (jl_value_t*)jl_methtable_type)); mt = (jl_methtable_t*) _mt; } - return jl_mt_assoc_by_type(mt, tt, world); + jl_method_instance_t* mi = jl_mt_assoc_by_type(mt, tt, world); + if (!mi) + return jl_nothing; + return (jl_value_t*) mi; } JL_DLLEXPORT jl_method_instance_t *jl_method_lookup(jl_value_t **args, size_t nargs, size_t world) @@ -2503,7 +2484,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_code_instance_t *unspec = jl_atomic_load_relaxed(&unspecmi->cache); jl_callptr_t unspec_invoke = NULL; if (unspec && (unspec_invoke = jl_atomic_load_acquire(&unspec->invoke))) { - jl_code_instance_t *codeinst = jl_new_codeinst(mi, + jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0); void *unspec_fptr = jl_atomic_load_relaxed(&unspec->specptr.fptr); @@ -2530,7 +2511,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t compile_option == JL_OPTIONS_COMPILE_MIN) { jl_code_info_t *src = jl_code_for_interpreter(mi, world); if (!jl_code_requires_compiler(src, 0)) { - jl_code_instance_t *codeinst = jl_new_codeinst(mi, + jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0); jl_atomic_store_release(&codeinst->invoke, jl_fptr_interpret_call); @@ -2565,7 +2546,8 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t // only these care about the exact specTypes, otherwise we can use it directly return ucache; } - codeinst = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, + codeinst = jl_new_codeinst(mi, jl_nothing, + (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, 0, 1, ~(size_t)0, 0, 0, jl_nothing, 0); void *unspec_fptr = jl_atomic_load_relaxed(&ucache->specptr.fptr); if (unspec_fptr) { @@ -2815,10 +2797,10 @@ static jl_method_instance_t *jl_get_compile_hint_specialization(jl_tupletype_t * static void _generate_from_hint(jl_method_instance_t *mi, size_t world) { - jl_value_t *codeinst = jl_rettype_inferred(mi, world, world); + jl_value_t *codeinst = jl_rettype_inferred_native(mi, world, world); if (codeinst == jl_nothing) { (void)jl_type_infer(mi, world, 1); - codeinst = jl_rettype_inferred(mi, world, world); + codeinst = jl_rettype_inferred_native(mi, world, world); } if (codeinst != jl_nothing) { if (jl_atomic_load_relaxed(&((jl_code_instance_t*)codeinst)->invoke) == jl_fptr_const_return) @@ -2857,10 +2839,10 @@ JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tuplet jl_method_instance_t *mi2 = jl_specializations_get_linfo(mi->def.method, (jl_value_t*)types2, tpenv2); JL_GC_POP(); jl_atomic_store_relaxed(&mi2->precompiled, 1); - if (jl_rettype_inferred(mi2, world, world) == jl_nothing) + if (jl_rettype_inferred_native(mi2, world, world) == jl_nothing) (void)jl_type_infer(mi2, world, 1); if (jl_typeinf_func && jl_atomic_load_relaxed(&mi->def.method->primary_world) <= tworld) { - if (jl_rettype_inferred(mi2, tworld, tworld) == jl_nothing) + if (jl_rettype_inferred_native(mi2, tworld, tworld) == jl_nothing) (void)jl_type_infer(mi2, tworld, 1); } } diff --git a/src/jltypes.c b/src/jltypes.c index 84e90303affaa..6f9347c8fadd4 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3265,25 +3265,23 @@ void jl_init_types(void) JL_GC_DISABLED jl_method_instance_type = jl_new_datatype(jl_symbol("MethodInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(10, + jl_perm_symsvec(9, "def", "specTypes", "sparam_vals", "uninferred", "backedges", - "callbacks", "cache", "inInference", "cache_with_orig", "precompiled"), - jl_svec(10, + jl_svec(9, jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type), jl_any_type, jl_simplevector_type, jl_any_type, jl_array_any_type, jl_any_type, - jl_any_type, jl_bool_type, jl_bool_type, jl_bool_type), @@ -3291,7 +3289,7 @@ void jl_init_types(void) JL_GC_DISABLED 0, 1, 3); // These fields should be constant, but Serialization wants to mutate them in initialization //const static uint32_t method_instance_constfields[1] = { 0x00000007 }; // (1<<0)|(1<<1)|(1<<2); - const static uint32_t method_instance_atomicfields[1] = { 0x00000248 }; // (1<<3)|(1<<6)|(1<<9); + const static uint32_t method_instance_atomicfields[1] = { 0x00000128 }; // (1<<3)|(1<<5)|(1<<8); //Fields 4 and 5 must be protected by method->write_lock, and thus all operations on jl_method_instance_t are threadsafe. TODO: except inInference //jl_method_instance_type->name->constfields = method_instance_constfields; jl_method_instance_type->name->atomicfields = method_instance_atomicfields; @@ -3299,8 +3297,9 @@ void jl_init_types(void) JL_GC_DISABLED jl_code_instance_type = jl_new_datatype(jl_symbol("CodeInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(16, + jl_perm_symsvec(17, "def", + "owner", "next", "min_world", "max_world", @@ -3314,9 +3313,10 @@ void jl_init_types(void) JL_GC_DISABLED "analysis_results", "isspecsig", "precompile", "relocatability", "invoke", "specptr"), // function object decls - jl_svec(16, + jl_svec(17, jl_method_instance_type, jl_any_type, + jl_any_type, jl_ulong_type, jl_ulong_type, jl_any_type, @@ -3333,11 +3333,11 @@ void jl_init_types(void) JL_GC_DISABLED jl_any_type, jl_any_type), // fptrs jl_emptysvec, 0, 1, 1); - jl_svecset(jl_code_instance_type->types, 1, jl_code_instance_type); - const static uint32_t code_instance_constfields[1] = { 0b0000010101110001 }; // Set fields 1, 5-7, 9, 11 as const - const static uint32_t code_instance_atomicfields[1] = { 0b1101001010001110 }; // Set fields 2-4, 8, 10, 13, 15-16 as atomic - //Fields 3-4 are only operated on by construction and deserialization, so are const at runtime - //Fields 11 and 15 must be protected by locks, and thus all operations on jl_code_instance_t are threadsafe + jl_svecset(jl_code_instance_type->types, 2, jl_code_instance_type); + const static uint32_t code_instance_constfields[1] = { 0b00000101011100011 }; // Set fields 1, 2, 6-8, 10, 12 as const + const static uint32_t code_instance_atomicfields[1] = { 0b11010010100011100 }; // Set fields 3-5, 9, 11, 14, 16-17 as atomic + //Fields 4-5 are only operated on by construction and deserialization, so are const at runtime + //Fields 12 and 16 must be protected by locks, and thus all operations on jl_code_instance_t are threadsafe jl_code_instance_type->name->constfields = code_instance_constfields; jl_code_instance_type->name->atomicfields = code_instance_atomicfields; @@ -3476,9 +3476,9 @@ void jl_init_types(void) JL_GC_DISABLED jl_svecset(jl_methtable_type->types, 9, jl_uint8_type); jl_svecset(jl_methtable_type->types, 10, jl_uint8_type); jl_svecset(jl_method_type->types, 12, jl_method_instance_type); - jl_svecset(jl_method_instance_type->types, 6, jl_code_instance_type); - jl_svecset(jl_code_instance_type->types, 14, jl_voidpointer_type); + jl_svecset(jl_method_instance_type->types, 5, jl_code_instance_type); jl_svecset(jl_code_instance_type->types, 15, jl_voidpointer_type); + jl_svecset(jl_code_instance_type->types, 16, jl_voidpointer_type); jl_svecset(jl_binding_type->types, 1, jl_globalref_type); jl_svecset(jl_binding_type->types, 2, jl_binding_type); diff --git a/src/julia.h b/src/julia.h index 7d143a3daa3fc..eb0fa55502b37 100644 --- a/src/julia.h +++ b/src/julia.h @@ -391,7 +391,6 @@ struct _jl_method_instance_t { jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sig _Atomic(jl_value_t*) uninferred; // cached uncompressed code, for generated functions, top-level thunks, or the interpreter jl_array_t *backedges; // list of method-instances which call this method-instance; `invoke` records (invokesig, caller) pairs - jl_array_t *callbacks; // list of callback functions to inform external caches about invalidations _Atomic(struct _jl_code_instance_t*) cache; uint8_t inInference; // flags to tell if inference is running on this object uint8_t cache_with_orig; // !cache_with_specTypes @@ -412,6 +411,7 @@ typedef struct _jl_opaque_closure_t { typedef struct _jl_code_instance_t { JL_DATA_TYPE jl_method_instance_t *def; // method this is specialized from + jl_value_t *owner; // Compiler token this belongs to, `jl_nothing` is reserved for native _Atomic(struct _jl_code_instance_t*) next; // pointer to the next cache entry // world range for which this object is valid to use @@ -2551,7 +2551,7 @@ typedef struct { int gcstack_arg; // Pass the ptls value as an argument with swiftself int use_jlplt; // Whether to use the Julia PLT mechanism or emit symbols directly - // Cache access. Default: jl_rettype_inferred. + // Cache access. Default: jl_rettype_inferred_native. jl_codeinstance_lookup_t lookup; } jl_cgparams_t; extern JL_DLLEXPORT int jl_default_debug_info_kind; diff --git a/src/julia_internal.h b/src/julia_internal.h index e140dfa205d8e..14b8b4f43ee92 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -669,7 +669,8 @@ jl_method_instance_t *jl_get_unspecialized_from_mi(jl_method_instance_t *method jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT); JL_DLLEXPORT jl_code_instance_t* jl_new_codeinst( - jl_method_instance_t *mi, jl_value_t *rettype, jl_value_t *exctype, + jl_method_instance_t *mi, jl_value_t *owner, + jl_value_t *rettype, jl_value_t *exctype, jl_value_t *inferred_const, jl_value_t *inferred, int32_t const_flags, size_t min_world, size_t max_world, uint32_t ipo_effects, uint32_t effects, jl_value_t *analysis_results, @@ -809,7 +810,7 @@ JL_DLLEXPORT int jl_is_toplevel_only_expr(jl_value_t *e) JL_NOTSAFEPOINT; jl_value_t *jl_call_scm_on_ast_and_loc(const char *funcname, jl_value_t *expr, jl_module_t *inmodule, const char *file, int line); -JL_DLLEXPORT jl_method_instance_t *jl_method_lookup_by_tt(jl_tupletype_t *tt, size_t world, jl_value_t *_mt); +JL_DLLEXPORT jl_value_t *jl_method_lookup_by_tt(jl_tupletype_t *tt, size_t world, jl_value_t *_mt); JL_DLLEXPORT jl_method_instance_t *jl_method_lookup(jl_value_t **args, size_t nargs, size_t world); jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t *gf, jl_value_t **args, size_t nargs); @@ -1021,7 +1022,7 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t*); JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module); JL_DLLEXPORT jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world, size_t *min_valid, size_t *max_valid, int mt_cache); jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_svec_t *sp); -JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *li JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); +JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_value_t *owner, jl_method_instance_t *li JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); JL_DLLEXPORT jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt JL_PROPAGATES_ROOT, jl_value_t *type, size_t world); JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo( diff --git a/src/method.c b/src/method.c index 3d3cc4cb7ea4e..88d43fb4b22ee 100644 --- a/src/method.c +++ b/src/method.c @@ -488,7 +488,6 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void) mi->sparam_vals = jl_emptysvec; jl_atomic_store_relaxed(&mi->uninferred, NULL); mi->backedges = NULL; - mi->callbacks = NULL; jl_atomic_store_relaxed(&mi->cache, NULL); mi->inInference = 0; mi->cache_with_orig = 0; diff --git a/src/opaque_closure.c b/src/opaque_closure.c index 0b0d1052bd449..45b08fee668db 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -153,7 +153,7 @@ JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tuplet sigtype = jl_argtype_with_function(env, (jl_value_t*)argt); jl_method_instance_t *mi = jl_specializations_get_linfo((jl_method_t*)root, sigtype, jl_emptysvec); - inst = jl_new_codeinst(mi, rt_ub, (jl_value_t*)jl_any_type, NULL, (jl_value_t*)ci, + inst = jl_new_codeinst(mi, jl_nothing, rt_ub, (jl_value_t*)jl_any_type, NULL, (jl_value_t*)ci, 0, world, world, 0, 0, jl_nothing, 0); jl_mi_cache_insert(mi, inst); diff --git a/src/staticdata.c b/src/staticdata.c index 8489fa116688e..0d38dddb79aaf 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -803,7 +803,6 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ // so must not be present here record_field_change((jl_value_t**)&mi->uninferred, NULL); record_field_change((jl_value_t**)&mi->backedges, NULL); - record_field_change((jl_value_t**)&mi->callbacks, NULL); record_field_change((jl_value_t**)&mi->cache, NULL); } else { @@ -835,6 +834,11 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ if (s->incremental && jl_is_code_instance(v)) { jl_code_instance_t *ci = (jl_code_instance_t*)v; // make sure we don't serialize other reachable cache entries of foreign methods + // Should this now be: + // if (ci !in ci->defs->cache) + // record_field_change((jl_value_t**)&ci->next, NULL); + // Why are we checking that the method/module this originates from is in_image? + // and then disconnect this CI? if (jl_object_in_image((jl_value_t*)ci->def->def.value)) { // TODO: if (ci in ci->defs->cache) record_field_change((jl_value_t**)&ci->next, NULL); @@ -2392,7 +2396,6 @@ static void strip_specializations_(jl_method_instance_t *mi) if (jl_options.strip_ir) { record_field_change((jl_value_t**)&mi->uninferred, NULL); record_field_change((jl_value_t**)&mi->backedges, NULL); - record_field_change((jl_value_t**)&mi->callbacks, NULL); } } diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 199724e54ae00..18bdc97ece6b8 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -239,11 +239,7 @@ static jl_array_t *queue_external_cis(jl_array_t *list) assert(found == 0 || found == 1 || found == 2); assert(stack.len == 0); if (found == 1 && jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0) { - void **bp = ptrhash_bp(&visited, mi); - if (*bp != (void*)((char*)HT_NOTFOUND + 3)) { - *bp = (void*)((char*)HT_NOTFOUND + 3); - jl_array_ptr_1d_push(new_ext_cis, (jl_value_t*)ci); - } + jl_array_ptr_1d_push(new_ext_cis, (jl_value_t*)ci); } } } @@ -1162,13 +1158,23 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets, jl_a jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(ext_ci_list, i); if (jl_atomic_load_relaxed(&ci->max_world) == WORLD_AGE_REVALIDATION_SENTINEL) { assert(jl_atomic_load_relaxed(&ci->min_world) == minworld); - ptrhash_put(&cis_pending_validation, (void*)ci->def, (void*)ci); + void **bp = ptrhash_bp(&cis_pending_validation, (void*)ci->def); + assert(!jl_atomic_load_relaxed(&ci->next)); + if (*bp == HT_NOTFOUND) + *bp = (void*)ci; + else { + // Do ci->owner bifurcates the cache, we temporarily + // form a linked list of all the CI that need to be connected later + jl_code_instance_t *prev_ci = (jl_code_instance_t *)*bp; + jl_atomic_store_relaxed(&ci->next, prev_ci); + *bp = (void*)ci; + } } else { assert(jl_atomic_load_relaxed(&ci->min_world) == 1); assert(jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0); jl_method_instance_t *caller = ci->def; - if (jl_atomic_load_relaxed(&ci->inferred) && jl_rettype_inferred(caller, minworld, ~(size_t)0) == jl_nothing) { + if (jl_atomic_load_relaxed(&ci->inferred) && jl_rettype_inferred(ci->owner, caller, minworld, ~(size_t)0) == jl_nothing) { jl_mi_cache_insert(caller, ci); } //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)caller); @@ -1211,18 +1217,26 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets, jl_a // Update any external CIs and add them to the cache. assert(jl_is_code_instance(ci)); jl_code_instance_t *codeinst = (jl_code_instance_t*)ci; - assert(jl_atomic_load_relaxed(&codeinst->min_world) == minworld); - assert(jl_atomic_load_relaxed(&codeinst->max_world) == WORLD_AGE_REVALIDATION_SENTINEL); - assert(jl_atomic_load_relaxed(&codeinst->inferred)); - jl_atomic_store_relaxed(&codeinst->max_world, maxvalid); - - if (jl_rettype_inferred(caller, minworld, maxvalid) != jl_nothing) { - // We already got a code instance for this world age range from somewhere else - we don't need - // this one. - continue; + while (codeinst) { + jl_code_instance_t *next_ci = jl_atomic_load_relaxed(&codeinst->next); + jl_atomic_store_relaxed(&codeinst->next, NULL); + + jl_value_t *owner = codeinst->owner; + JL_GC_PROMISE_ROOTED(owner); + + assert(jl_atomic_load_relaxed(&codeinst->min_world) == minworld); + assert(jl_atomic_load_relaxed(&codeinst->max_world) == WORLD_AGE_REVALIDATION_SENTINEL); + assert(jl_atomic_load_relaxed(&codeinst->inferred)); + jl_atomic_store_relaxed(&codeinst->max_world, maxvalid); + + if (jl_rettype_inferred(owner, caller, minworld, maxvalid) != jl_nothing) { + // We already got a code instance for this world age range from somewhere else - we don't need + // this one. + } else { + jl_mi_cache_insert(caller, codeinst); + } + codeinst = next_ci; } - - jl_mi_cache_insert(caller, codeinst); } else { // Likely internal. Find the CI already in the cache hierarchy. diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index cd779b8868a37..b7cbf8acec6ef 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -527,25 +527,7 @@ function find_start_brace(s::AbstractString; c_start='(', c_end=')') return (startind:lastindex(s), method_name_end) end -struct REPLInterpreterCache - dict::IdDict{MethodInstance,CodeInstance} -end -REPLInterpreterCache() = REPLInterpreterCache(IdDict{MethodInstance,CodeInstance}()) -const REPL_INTERPRETER_CACHE = REPLInterpreterCache() - -function get_code_cache() - # XXX Avoid storing analysis results into the cache that persists across precompilation, - # as [sys|pkg]image currently doesn't support serializing externally created `CodeInstance`. - # Otherwise, `CodeInstance`s created by `REPLInterpreter`, that are much less optimized - # that those produced by `NativeInterpreter`, will leak into the native code cache, - # potentially causing runtime slowdown. - # (see https://github.com/JuliaLang/julia/issues/48453). - if Base.generating_output() - return REPLInterpreterCache() - else - return REPL_INTERPRETER_CACHE - end -end +struct REPLCacheToken end struct REPLInterpreter <: CC.AbstractInterpreter limit_aggressive_inference::Bool @@ -553,7 +535,7 @@ struct REPLInterpreter <: CC.AbstractInterpreter inf_params::CC.InferenceParams opt_params::CC.OptimizationParams inf_cache::Vector{CC.InferenceResult} - code_cache::REPLInterpreterCache + code_cache::CC.InternalCodeCache function REPLInterpreter(limit_aggressive_inference::Bool=false; world::UInt = Base.get_world_counter(), inf_params::CC.InferenceParams = CC.InferenceParams(; @@ -561,7 +543,7 @@ struct REPLInterpreter <: CC.AbstractInterpreter unoptimize_throw_blocks=false), opt_params::CC.OptimizationParams = CC.OptimizationParams(), inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[], - code_cache::REPLInterpreterCache = get_code_cache()) + code_cache::CC.InternalCodeCache = CC.InternalCodeCache(REPLCacheToken())) return new(limit_aggressive_inference, world, inf_params, opt_params, inf_cache, code_cache) end end @@ -569,16 +551,8 @@ CC.InferenceParams(interp::REPLInterpreter) = interp.inf_params CC.OptimizationParams(interp::REPLInterpreter) = interp.opt_params CC.get_inference_world(interp::REPLInterpreter) = interp.world CC.get_inference_cache(interp::REPLInterpreter) = interp.inf_cache +CC.cache_owner(::REPLInterpreter) = REPLCacheToken() CC.code_cache(interp::REPLInterpreter) = CC.WorldView(interp.code_cache, CC.WorldRange(interp.world)) -CC.get(wvc::CC.WorldView{REPLInterpreterCache}, mi::MethodInstance, default) = get(wvc.cache.dict, mi, default) -CC.getindex(wvc::CC.WorldView{REPLInterpreterCache}, mi::MethodInstance) = getindex(wvc.cache.dict, mi) -CC.haskey(wvc::CC.WorldView{REPLInterpreterCache}, mi::MethodInstance) = haskey(wvc.cache.dict, mi) -function CC.setindex!(wvc::CC.WorldView{REPLInterpreterCache}, ci::CodeInstance, mi::MethodInstance) - CC.add_invalidation_callback!(mi) do replaced::MethodInstance, max_world::UInt32 - delete!(wvc.cache.dict, replaced) - end - return setindex!(wvc.cache.dict, ci, mi) -end # REPLInterpreter is only used for type analysis, so it should disable optimization entirely CC.may_optimize(::REPLInterpreter) = false diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index 77636bfba9a17..cbd4fbc51f1ad 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -368,7 +368,7 @@ let NoinlineModule = Module() # it should work for cached results method = only(methods(inlined_usually, (Float64,Float64,Float64,))) mi = CC.specialize_method(method, Tuple{typeof(inlined_usually),Float64,Float64,Float64}, Core.svec()) - @test haskey(interp.code_cache.dict, mi) + @test CC.haskey(CC.WorldView(interp.code_cache, interp.world), mi) let src = code_typed1((Float64,Float64,Float64); interp) do x, y, z inlined_usually(x, y, z) end @@ -442,7 +442,8 @@ function custom_lookup(mi::MethodInstance, min_world::UInt, max_world::UInt) end end end - return CONST_INVOKE_INTERP.code_cache.dict[mi] + # XXX: This seems buggy, custom_lookup should probably construct the absint on demand. + return CC.getindex(CC.code_cache(CONST_INVOKE_INTERP), mi) end let # generate cache diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index 2a4f04f8f79ea..a673eca0548eb 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -71,11 +71,8 @@ using .CC: InferenceResult, OptimizationState, IRCode using .EA: analyze_escapes, ArgEscapeCache, EscapeInfo, EscapeState -struct CodeCache - cache::IdDict{MethodInstance,CodeInstance} -end -CodeCache() = CodeCache(IdDict{MethodInstance,CodeInstance}()) -const GLOBAL_CODE_CACHE = CodeCache() +struct EAToken end +const GLOBAL_CODE_CACHE = CC.InternalCodeCache(EAToken()) # when working outside of Core.Compiler, # cache entire escape state for later inspection and debugging @@ -86,7 +83,7 @@ struct EscapeCacheInfo end struct EscapeCache - cache::IdDict{MethodInstance,EscapeCacheInfo} + cache::IdDict{MethodInstance,EscapeCacheInfo} # TODO(aviatesk) Should this be CodeInstance to EscapeCacheInfo? end EscapeCache() = EscapeCache(IdDict{MethodInstance,EscapeCacheInfo}()) const GLOBAL_ESCAPE_CACHE = EscapeCache() @@ -102,12 +99,12 @@ mutable struct EscapeAnalyzer <: AbstractInterpreter const inf_params::InferenceParams const opt_params::OptimizationParams const inf_cache::Vector{InferenceResult} - const code_cache::CodeCache + const code_cache::CC.InternalCodeCache const escape_cache::EscapeCache const entry_mi::MethodInstance result::EscapeResultForEntry function EscapeAnalyzer(world::UInt, entry_mi::MethodInstance, - code_cache::CodeCache=GLOBAL_CODE_CACHE, + code_cache::CC.InternalCodeCache=GLOBAL_CODE_CACHE, escape_cache::EscapeCache=GLOBAL_ESCAPE_CACHE) inf_params = InferenceParams() opt_params = OptimizationParams() @@ -120,27 +117,11 @@ CC.InferenceParams(interp::EscapeAnalyzer) = interp.inf_params CC.OptimizationParams(interp::EscapeAnalyzer) = interp.opt_params CC.get_inference_world(interp::EscapeAnalyzer) = interp.world CC.get_inference_cache(interp::EscapeAnalyzer) = interp.inf_cache - -struct EscapeAnalyzerCacheView - code_cache::CodeCache - escape_cache::EscapeCache -end +CC.cache_owner(::EscapeAnalyzer) = EAToken() function CC.code_cache(interp::EscapeAnalyzer) worlds = WorldRange(CC.get_inference_world(interp)) - return WorldView(EscapeAnalyzerCacheView(interp.code_cache, interp.escape_cache), worlds) -end -CC.haskey(wvc::WorldView{EscapeAnalyzerCacheView}, mi::MethodInstance) = haskey(wvc.cache.code_cache.cache, mi) -CC.get(wvc::WorldView{EscapeAnalyzerCacheView}, mi::MethodInstance, default) = get(wvc.cache.code_cache.cache, mi, default) -CC.getindex(wvc::WorldView{EscapeAnalyzerCacheView}, mi::MethodInstance) = getindex(wvc.cache.code_cache.cache, mi) -function CC.setindex!(wvc::WorldView{EscapeAnalyzerCacheView}, ci::CodeInstance, mi::MethodInstance) - wvc.cache.code_cache.cache[mi] = ci - # register the callback on invalidation - CC.add_invalidation_callback!(mi) do replaced::MethodInstance, max_world::UInt32 - delete!(wvc.cache.code_cache.cache, replaced) - delete!(wvc.cache.escape_cache.cache, replaced) - end - return wvc + return WorldView(interp.code_cache, worlds) end function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, ir::IRCode, caller::InferenceResult) diff --git a/test/compiler/invalidation.jl b/test/compiler/invalidation.jl index e3263156499c8..eaa7e2e6ea75b 100644 --- a/test/compiler/invalidation.jl +++ b/test/compiler/invalidation.jl @@ -10,48 +10,31 @@ const CC = Core.Compiler import Core: MethodInstance, CodeInstance import .CC: WorldRange, WorldView -struct InvalidationTesterCache - dict::IdDict{MethodInstance,CodeInstance} -end -InvalidationTesterCache() = InvalidationTesterCache(IdDict{MethodInstance,CodeInstance}()) - -const INVALIDATION_TESTER_CACHE = InvalidationTesterCache() +struct InvalidationTesterToken end +const INVALIDATION_TESTER_CACHE = Core.Compiler.InternalCodeCache(InvalidationTesterToken()) struct InvalidationTester <: CC.AbstractInterpreter world::UInt inf_params::CC.InferenceParams opt_params::CC.OptimizationParams inf_cache::Vector{CC.InferenceResult} - code_cache::InvalidationTesterCache + code_cache::Core.Compiler.InternalCodeCache function InvalidationTester(; world::UInt = Base.get_world_counter(), inf_params::CC.InferenceParams = CC.InferenceParams(), opt_params::CC.OptimizationParams = CC.OptimizationParams(), inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[], - code_cache::InvalidationTesterCache = INVALIDATION_TESTER_CACHE) + code_cache::Core.Compiler.InternalCodeCache = INVALIDATION_TESTER_CACHE) return new(world, inf_params, opt_params, inf_cache, code_cache) end end -struct InvalidationTesterCacheView - dict::IdDict{MethodInstance,CodeInstance} -end - CC.InferenceParams(interp::InvalidationTester) = interp.inf_params CC.OptimizationParams(interp::InvalidationTester) = interp.opt_params CC.get_inference_world(interp::InvalidationTester) = interp.world CC.get_inference_cache(interp::InvalidationTester) = interp.inf_cache -CC.code_cache(interp::InvalidationTester) = WorldView(InvalidationTesterCacheView(interp.code_cache.dict), WorldRange(interp.world)) -CC.get(wvc::WorldView{InvalidationTesterCacheView}, mi::MethodInstance, default) = get(wvc.cache.dict, mi, default) -CC.getindex(wvc::WorldView{InvalidationTesterCacheView}, mi::MethodInstance) = getindex(wvc.cache.dict, mi) -CC.haskey(wvc::WorldView{InvalidationTesterCacheView}, mi::MethodInstance) = haskey(wvc.cache.dict, mi) -function CC.setindex!(wvc::WorldView{InvalidationTesterCacheView}, ci::CodeInstance, mi::MethodInstance) - CC.add_invalidation_callback!(mi) do replaced::MethodInstance, max_world::UInt32 - delete!(wvc.cache.dict, replaced) - # Core.println("[InvalidationTester] ", replaced) # debug - end - setindex!(wvc.cache.dict, ci, mi) -end +CC.cache_owner(::InvalidationTester) = InvalidationTesterToken() +CC.code_cache(interp::InvalidationTester) = WorldView(interp.code_cache, interp.world) # basic functionality test # ------------------------ @@ -63,33 +46,55 @@ basic_caller(x) = basic_callee(x) @test Base.return_types((Float64,); interp=InvalidationTester()) do x basic_caller(x) end |> only === Float64 -@test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :basic_callee + +let mi = Base.method_instance(basic_callee, (Float64,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end -@test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :basic_caller + +let mi = Base.method_instance(basic_caller, (Float64,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end # this redefinition below should invalidate the cache +const BASIC_CALLER_WORLD = Base.get_world_counter() basic_callee(x) = x, x -@test !any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :basic_callee -end -@test !any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :basic_caller +@test !isdefined(Base.method_instance(basic_callee, (Float64,)), :cache) +let mi = Base.method_instance(basic_caller, (Float64,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == BASIC_CALLER_WORLD end # re-run inference and check the result is updated (and new cache exists) @test Base.return_types((Float64,); interp=InvalidationTester()) do x basic_caller(x) end |> only === Tuple{Float64,Float64} -@test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :basic_callee +let mi = Base.method_instance(basic_callee, (Float64,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end -@test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :basic_caller + +let mi = Base.method_instance(basic_caller, (Float64,)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world != typemax(UInt) end + # backedge optimization # --------------------- @@ -115,24 +120,47 @@ begin take!(GLOBAL_BUFFER) @test rt === Any @test any(iscall((src, pr48932_callee)), src.code) end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_callee + + let mi = only(Base.specializations(Base.only(Base.methods(pr48932_callee)))) + # Base.method_instance(pr48932_callee, (Any,)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) + + # In cache due to Base.return_types(pr48932_callee, (Any,)) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world == typemax(UInt) end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_caller + let mi = Base.method_instance(pr48932_caller, (Int,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end + @test 42 == pr48932_caller(42) @test "42" == String(take!(GLOBAL_BUFFER)) # test that we didn't add the backedge from `pr48932_callee` to `pr48932_caller`: # this redefinition below should invalidate the cache of `pr48932_callee` but not that of `pr48932_caller` pr48932_callee(x) = (print(GLOBAL_BUFFER, x); nothing) - @test !any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_callee - end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_caller + + @test isempty(Base.specializations(Base.only(Base.methods(pr48932_callee)))) + let mi = only(Base.specializations(Base.only(Base.methods(pr48932_caller)))) + # Base.method_instance(pr48932_callee, (Any,)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world == typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end + @test isnothing(pr48932_caller(42)) @test "42" == String(take!(GLOBAL_BUFFER)) end @@ -159,23 +187,41 @@ begin take!(GLOBAL_BUFFER) @test rt === Nothing @test any(iscall((src, pr48932_callee_inferable)), src.code) end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_callee_inferable + + let mi = only(Base.specializations(Base.only(Base.methods(pr48932_callee_inferable)))) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world == typemax(UInt) end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_caller_unuse + let mi = Base.method_instance(pr48932_caller_unuse, (Int,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end + @test isnothing(pr48932_caller_unuse(42)) @test "42" == String(take!(GLOBAL_BUFFER)) # test that we didn't add the backedge from `pr48932_callee_inferable` to `pr48932_caller_unuse`: # this redefinition below should invalidate the cache of `pr48932_callee_inferable` but not that of `pr48932_caller_unuse` pr48932_callee_inferable(x) = (print(GLOBAL_BUFFER, "foo"); x) - @test !any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_callee_inferable - end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_caller_unuse + + @test isempty(Base.specializations(Base.only(Base.methods(pr48932_callee_inferable)))) + let mi = Base.method_instance(pr48932_caller_unuse, (Int,)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world == typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end @test isnothing(pr48932_caller_unuse(42)) @test "foo" == String(take!(GLOBAL_BUFFER)) @@ -201,24 +247,43 @@ begin take!(GLOBAL_BUFFER) @test rt === Any @test any(isinvoke(:pr48932_callee_inlined), src.code) end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_callee_inlined + + let mi = Base.method_instance(pr48932_callee_inlined, (Int,)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world == typemax(UInt) end - @test any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_caller_inlined + let mi = Base.method_instance(pr48932_caller_inlined, (Int,)) + ci = mi.cache + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world == typemax(UInt) end + @test 42 == pr48932_caller_inlined(42) @test "42" == String(take!(GLOBAL_BUFFER)) # test that we added the backedge from `pr48932_callee_inlined` to `pr48932_caller_inlined`: # this redefinition below should invalidate the cache of `pr48932_callee_inlined` but not that of `pr48932_caller_inlined` @noinline pr48932_callee_inlined(@nospecialize x) = (print(GLOBAL_BUFFER, x); nothing) - @test !any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_callee_inlined - end - @test !any(INVALIDATION_TESTER_CACHE.dict) do (mi, ci) - mi.def.name === :pr48932_caller_inlined + + @test isempty(Base.specializations(Base.only(Base.methods(pr48932_callee_inlined)))) + let mi = Base.method_instance(pr48932_caller_inlined, (Int,)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world != typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === InvalidationTesterToken() + @test ci.max_world != typemax(UInt) end + @test isnothing(pr48932_caller_inlined(42)) @test "42" == String(take!(GLOBAL_BUFFER)) end diff --git a/test/compiler/newinterp.jl b/test/compiler/newinterp.jl index 4b2cabcf33c3b..aeb49f6982c23 100644 --- a/test/compiler/newinterp.jl +++ b/test/compiler/newinterp.jl @@ -9,28 +9,24 @@ Defines new `NewInterpreter <: AbstractInterpreter` whose cache is separated from the native code cache, satisfying the minimum interface requirements. """ macro newinterp(InterpName) - InterpCacheName = esc(Symbol(string(InterpName, "Cache"))) + InterpCacheName = QuoteNode(Symbol(string(InterpName, "Cache"))) InterpName = esc(InterpName) C = Core CC = Core.Compiler quote - struct $InterpCacheName - dict::IdDict{$C.MethodInstance,$C.CodeInstance} - end - $InterpCacheName() = $InterpCacheName(IdDict{$C.MethodInstance,$C.CodeInstance}()) struct $InterpName <: $CC.AbstractInterpreter meta # additional information world::UInt inf_params::$CC.InferenceParams opt_params::$CC.OptimizationParams inf_cache::Vector{$CC.InferenceResult} - code_cache::$InterpCacheName + code_cache::$CC.InternalCodeCache function $InterpName(meta = nothing; world::UInt = Base.get_world_counter(), inf_params::$CC.InferenceParams = $CC.InferenceParams(), opt_params::$CC.OptimizationParams = $CC.OptimizationParams(), inf_cache::Vector{$CC.InferenceResult} = $CC.InferenceResult[], - code_cache::$InterpCacheName = $InterpCacheName()) + code_cache::$CC.InternalCodeCache = $CC.InternalCodeCache($InterpCacheName)) return new(meta, world, inf_params, opt_params, inf_cache, code_cache) end end @@ -39,9 +35,6 @@ macro newinterp(InterpName) $CC.get_inference_world(interp::$InterpName) = interp.world $CC.get_inference_cache(interp::$InterpName) = interp.inf_cache $CC.code_cache(interp::$InterpName) = $CC.WorldView(interp.code_cache, $CC.WorldRange(interp.world)) - $CC.get(wvc::$CC.WorldView{$InterpCacheName}, mi::$C.MethodInstance, default) = get(wvc.cache.dict, mi, default) - $CC.getindex(wvc::$CC.WorldView{$InterpCacheName}, mi::$C.MethodInstance) = getindex(wvc.cache.dict, mi) - $CC.haskey(wvc::$CC.WorldView{$InterpCacheName}, mi::$C.MethodInstance) = haskey(wvc.cache.dict, mi) - $CC.setindex!(wvc::$CC.WorldView{$InterpCacheName}, ci::$C.CodeInstance, mi::$C.MethodInstance) = setindex!(wvc.cache.dict, ci, mi) + $CC.cache_owner(::$InterpName) = $InterpCacheName end end diff --git a/test/core.jl b/test/core.jl index a2c3b3cff9e6b..504cfe99ba68c 100644 --- a/test/core.jl +++ b/test/core.jl @@ -14,7 +14,7 @@ include("testenv.jl") # sanity tests that our built-in types are marked correctly for const fields for (T, c) in ( (Core.CodeInfo, []), - (Core.CodeInstance, [:def, :rettype, :exctype, :rettype_const, :ipo_purity_bits, :analysis_results]), + (Core.CodeInstance, [:def, :owner, :rettype, :exctype, :rettype_const, :ipo_purity_bits, :analysis_results]), (Core.Method, [#=:name, :module, :file, :line, :primary_world, :sig, :slot_syms, :external_mt, :nargs, :called, :nospecialize, :nkw, :isva, :is_for_opaque_closure, :constprop=#]), (Core.MethodInstance, [#=:def, :specTypes, :sparam_vals=#]), (Core.MethodTable, [:module]), diff --git a/test/precompile.jl b/test/precompile.jl index 1eed9b1418a63..c05ad7b9c61b6 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -1693,13 +1693,14 @@ precompile_test_harness("Issue #46558") do load_path @test (@eval $Foo.foo(1)) == 2 end +# TODO: Decide if we need to keep supporting this. precompile_test_harness("issue #46296") do load_path write(joinpath(load_path, "CodeInstancePrecompile.jl"), """ module CodeInstancePrecompile mi = first(Base.specializations(first(methods(identity)))) - ci = Core.CodeInstance(mi, Any, Any, nothing, nothing, zero(Int32), typemin(UInt), + ci = Core.CodeInstance(mi, nothing, Any, Any, nothing, nothing, zero(Int32), typemin(UInt), typemax(UInt), zero(UInt32), zero(UInt32), nothing, 0x00) __init__() = @assert ci isa Core.CodeInstance @@ -1710,6 +1711,75 @@ precompile_test_harness("issue #46296") do load_path (@eval (using CodeInstancePrecompile)) end +precompile_test_harness("AbstractInterpreter caching") do load_path + write(joinpath(load_path, "SimpleModule.jl"), + """ + module SimpleModule + basic_callee(x) = x + basic_caller(x) = basic_callee(x) + end + """) + write(joinpath(load_path, "CustomAbstractInterpreterCaching.jl"), + """ + module CustomAbstractInterpreterCaching + import SimpleModule: basic_caller, basic_callee + module Custom + const CC = Core.Compiler + import Core: MethodInstance, CodeInstance + import .CC: WorldRange, WorldView + + struct InvalidationTesterToken end + const INVALIDATION_TESTER_CACHE = Core.Compiler.InternalCodeCache(InvalidationTesterToken()) + + struct InvalidationTester <: CC.AbstractInterpreter + world::UInt + inf_params::CC.InferenceParams + opt_params::CC.OptimizationParams + inf_cache::Vector{CC.InferenceResult} + code_cache::Core.Compiler.InternalCodeCache + function InvalidationTester(; + world::UInt = Base.get_world_counter(), + inf_params::CC.InferenceParams = CC.InferenceParams(), + opt_params::CC.OptimizationParams = CC.OptimizationParams(), + inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[], + code_cache::Core.Compiler.InternalCodeCache = INVALIDATION_TESTER_CACHE) + return new(world, inf_params, opt_params, inf_cache, code_cache) + end + end + + CC.InferenceParams(interp::InvalidationTester) = interp.inf_params + CC.OptimizationParams(interp::InvalidationTester) = interp.opt_params + CC.get_world_counter(interp::InvalidationTester) = interp.world + CC.get_inference_cache(interp::InvalidationTester) = interp.inf_cache + CC.cache_owner(::InvalidationTester) = InvalidationTesterToken() + CC.code_cache(interp::InvalidationTester) = WorldView(interp.code_cache, interp.world) + end + + Base.return_types((Float64,)) do x + basic_caller(x) + end + Base.return_types((Float64,); interp=Custom.InvalidationTester()) do x + basic_caller(x) + end + end + """) + Base.compilecache(Base.PkgId("CustomAbstractInterpreterCaching")) + (@eval begin + using CustomAbstractInterpreterCaching + let m = only(methods(CustomAbstractInterpreterCaching.basic_callee)) + mi = only(Base.specializations(m)) + ci = mi.cache + @test isdefined(ci, :next) + @test ci.owner === nothing + @test ci.max_world == typemax(UInt) + ci = ci.next + @test !isdefined(ci, :next) + @test ci.owner === CustomAbstractInterpreterCaching.Custom.InvalidationTesterToken() + @test ci.max_world == typemax(UInt) + end + end) +end + precompile_test_harness("Recursive types") do load_path write(joinpath(load_path, "RecursiveTypeDef.jl"), """ diff --git a/test/reflection.jl b/test/reflection.jl index f81605622a806..c4e941fa7bb1a 100644 --- a/test/reflection.jl +++ b/test/reflection.jl @@ -1006,8 +1006,9 @@ end @testset "lookup mi" begin @test 1+1 == 2 - mi1 = @ccall jl_method_lookup_by_tt(Tuple{typeof(+), Int, Int}::Any, Base.get_world_counter()::Csize_t, nothing::Any)::Ref{Core.MethodInstance} + mi1 = Base.method_instance(+, (Int, Int)) @test mi1.def.name == :+ + # Note `jl_method_lookup` doesn't returns CNull if not found mi2 = @ccall jl_method_lookup(Any[+, 1, 1]::Ptr{Any}, 3::Csize_t, Base.get_world_counter()::Csize_t)::Ref{Core.MethodInstance} @test mi1 == mi2 end From d5c5248df143c3b60386b101449dd56b6c79534d Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki Date: Fri, 9 Feb 2024 01:16:49 +0900 Subject: [PATCH 2/3] remove `code_cache` API requirement --- base/compiler/cicache.jl | 8 ++++++-- base/compiler/types.jl | 2 -- stdlib/REPL/src/REPLCompletions.jl | 7 ++----- test/compiler/AbstractInterpreter.jl | 2 +- test/compiler/EscapeAnalysis/EAUtils.jl | 12 ++---------- test/compiler/invalidation.jl | 10 ++-------- test/compiler/newinterp.jl | 7 ++----- test/precompile.jl | 10 ++-------- 8 files changed, 17 insertions(+), 41 deletions(-) diff --git a/base/compiler/cicache.jl b/base/compiler/cicache.jl index fbd9530e74f7b..a6ed18fe5105f 100644 --- a/base/compiler/cicache.jl +++ b/base/compiler/cicache.jl @@ -17,8 +17,6 @@ function setindex!(cache::InternalCodeCache, ci::CodeInstance, mi::MethodInstanc return cache end -const GLOBAL_CI_CACHE = InternalCodeCache(nothing) - struct WorldRange min_world::UInt max_world::UInt @@ -73,3 +71,9 @@ function setindex!(wvc::WorldView{InternalCodeCache}, ci::CodeInstance, mi::Meth setindex!(wvc.cache, ci, mi) return wvc end + +function code_cache(interp::AbstractInterpreter) + cache = InternalCodeCache(cache_owner(interp)) + worlds = WorldRange(get_inference_world(interp)) + return WorldView(cache, worlds) +end diff --git a/base/compiler/types.jl b/base/compiler/types.jl index fe6e21621f8fd..642a7ac551662 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -15,7 +15,6 @@ the following methods to satisfy the `AbstractInterpreter` API requirement: - `get_inference_world(interp::NewInterpreter)` - return the world age for this interpreter - `get_inference_cache(interp::NewInterpreter)` - return the local inference cache - `cache_owner(interp::NewInterpreter)` - return the owner of any new cache entries -- `code_cache(interp::NewInterpreter)` - return the global inference cache """ :(AbstractInterpreter) @@ -405,7 +404,6 @@ InferenceParams(interp::NativeInterpreter) = interp.inf_params OptimizationParams(interp::NativeInterpreter) = interp.opt_params get_inference_world(interp::NativeInterpreter) = interp.world get_inference_cache(interp::NativeInterpreter) = interp.inf_cache -code_cache(interp::NativeInterpreter) = WorldView(GLOBAL_CI_CACHE, get_inference_world(interp)) cache_owner(interp::NativeInterpreter) = nothing """ diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index b7cbf8acec6ef..215814dc8cada 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -535,16 +535,14 @@ struct REPLInterpreter <: CC.AbstractInterpreter inf_params::CC.InferenceParams opt_params::CC.OptimizationParams inf_cache::Vector{CC.InferenceResult} - code_cache::CC.InternalCodeCache function REPLInterpreter(limit_aggressive_inference::Bool=false; world::UInt = Base.get_world_counter(), inf_params::CC.InferenceParams = CC.InferenceParams(; aggressive_constant_propagation=true, unoptimize_throw_blocks=false), opt_params::CC.OptimizationParams = CC.OptimizationParams(), - inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[], - code_cache::CC.InternalCodeCache = CC.InternalCodeCache(REPLCacheToken())) - return new(limit_aggressive_inference, world, inf_params, opt_params, inf_cache, code_cache) + inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[]) + return new(limit_aggressive_inference, world, inf_params, opt_params, inf_cache) end end CC.InferenceParams(interp::REPLInterpreter) = interp.inf_params @@ -552,7 +550,6 @@ CC.OptimizationParams(interp::REPLInterpreter) = interp.opt_params CC.get_inference_world(interp::REPLInterpreter) = interp.world CC.get_inference_cache(interp::REPLInterpreter) = interp.inf_cache CC.cache_owner(::REPLInterpreter) = REPLCacheToken() -CC.code_cache(interp::REPLInterpreter) = CC.WorldView(interp.code_cache, CC.WorldRange(interp.world)) # REPLInterpreter is only used for type analysis, so it should disable optimization entirely CC.may_optimize(::REPLInterpreter) = false diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index cbd4fbc51f1ad..c0b320009b8ec 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -368,7 +368,7 @@ let NoinlineModule = Module() # it should work for cached results method = only(methods(inlined_usually, (Float64,Float64,Float64,))) mi = CC.specialize_method(method, Tuple{typeof(inlined_usually),Float64,Float64,Float64}, Core.svec()) - @test CC.haskey(CC.WorldView(interp.code_cache, interp.world), mi) + @test CC.haskey(CC.code_cache(interp), mi) let src = code_typed1((Float64,Float64,Float64); interp) do x, y, z inlined_usually(x, y, z) end diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index a673eca0548eb..87228aaf2858a 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -62,7 +62,7 @@ __clear_cache!() = empty!(GLOBAL_EA_CODE_CACHE) # imports import .CC: AbstractInterpreter, NativeInterpreter, WorldView, WorldRange, - InferenceParams, OptimizationParams, get_world_counter, get_inference_cache, code_cache, + InferenceParams, OptimizationParams, get_world_counter, get_inference_cache, ipo_dataflow_analysis!, cache_result! # usings using Core: @@ -72,7 +72,6 @@ using .CC: using .EA: analyze_escapes, ArgEscapeCache, EscapeInfo, EscapeState struct EAToken end -const GLOBAL_CODE_CACHE = CC.InternalCodeCache(EAToken()) # when working outside of Core.Compiler, # cache entire escape state for later inspection and debugging @@ -99,17 +98,15 @@ mutable struct EscapeAnalyzer <: AbstractInterpreter const inf_params::InferenceParams const opt_params::OptimizationParams const inf_cache::Vector{InferenceResult} - const code_cache::CC.InternalCodeCache const escape_cache::EscapeCache const entry_mi::MethodInstance result::EscapeResultForEntry function EscapeAnalyzer(world::UInt, entry_mi::MethodInstance, - code_cache::CC.InternalCodeCache=GLOBAL_CODE_CACHE, escape_cache::EscapeCache=GLOBAL_ESCAPE_CACHE) inf_params = InferenceParams() opt_params = OptimizationParams() inf_cache = InferenceResult[] - return new(world, inf_params, opt_params, inf_cache, code_cache, escape_cache, entry_mi) + return new(world, inf_params, opt_params, inf_cache, escape_cache, entry_mi) end end @@ -119,11 +116,6 @@ CC.get_inference_world(interp::EscapeAnalyzer) = interp.world CC.get_inference_cache(interp::EscapeAnalyzer) = interp.inf_cache CC.cache_owner(::EscapeAnalyzer) = EAToken() -function CC.code_cache(interp::EscapeAnalyzer) - worlds = WorldRange(CC.get_inference_world(interp)) - return WorldView(interp.code_cache, worlds) -end - function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, ir::IRCode, caller::InferenceResult) # run EA on all frames that have been optimized nargs = let def = caller.linfo.def; isa(def, Method) ? Int(def.nargs) : 0; end diff --git a/test/compiler/invalidation.jl b/test/compiler/invalidation.jl index eaa7e2e6ea75b..d5e86ca533f05 100644 --- a/test/compiler/invalidation.jl +++ b/test/compiler/invalidation.jl @@ -7,25 +7,20 @@ include("irutils.jl") using Test const CC = Core.Compiler -import Core: MethodInstance, CodeInstance -import .CC: WorldRange, WorldView struct InvalidationTesterToken end -const INVALIDATION_TESTER_CACHE = Core.Compiler.InternalCodeCache(InvalidationTesterToken()) struct InvalidationTester <: CC.AbstractInterpreter world::UInt inf_params::CC.InferenceParams opt_params::CC.OptimizationParams inf_cache::Vector{CC.InferenceResult} - code_cache::Core.Compiler.InternalCodeCache function InvalidationTester(; world::UInt = Base.get_world_counter(), inf_params::CC.InferenceParams = CC.InferenceParams(), opt_params::CC.OptimizationParams = CC.OptimizationParams(), - inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[], - code_cache::Core.Compiler.InternalCodeCache = INVALIDATION_TESTER_CACHE) - return new(world, inf_params, opt_params, inf_cache, code_cache) + inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[]) + return new(world, inf_params, opt_params, inf_cache) end end @@ -34,7 +29,6 @@ CC.OptimizationParams(interp::InvalidationTester) = interp.opt_params CC.get_inference_world(interp::InvalidationTester) = interp.world CC.get_inference_cache(interp::InvalidationTester) = interp.inf_cache CC.cache_owner(::InvalidationTester) = InvalidationTesterToken() -CC.code_cache(interp::InvalidationTester) = WorldView(interp.code_cache, interp.world) # basic functionality test # ------------------------ diff --git a/test/compiler/newinterp.jl b/test/compiler/newinterp.jl index aeb49f6982c23..1157a52ebbcbe 100644 --- a/test/compiler/newinterp.jl +++ b/test/compiler/newinterp.jl @@ -20,21 +20,18 @@ macro newinterp(InterpName) inf_params::$CC.InferenceParams opt_params::$CC.OptimizationParams inf_cache::Vector{$CC.InferenceResult} - code_cache::$CC.InternalCodeCache function $InterpName(meta = nothing; world::UInt = Base.get_world_counter(), inf_params::$CC.InferenceParams = $CC.InferenceParams(), opt_params::$CC.OptimizationParams = $CC.OptimizationParams(), - inf_cache::Vector{$CC.InferenceResult} = $CC.InferenceResult[], - code_cache::$CC.InternalCodeCache = $CC.InternalCodeCache($InterpCacheName)) - return new(meta, world, inf_params, opt_params, inf_cache, code_cache) + inf_cache::Vector{$CC.InferenceResult} = $CC.InferenceResult[]) + return new(meta, world, inf_params, opt_params, inf_cache) end end $CC.InferenceParams(interp::$InterpName) = interp.inf_params $CC.OptimizationParams(interp::$InterpName) = interp.opt_params $CC.get_inference_world(interp::$InterpName) = interp.world $CC.get_inference_cache(interp::$InterpName) = interp.inf_cache - $CC.code_cache(interp::$InterpName) = $CC.WorldView(interp.code_cache, $CC.WorldRange(interp.world)) $CC.cache_owner(::$InterpName) = $InterpCacheName end end diff --git a/test/precompile.jl b/test/precompile.jl index c05ad7b9c61b6..25e28ae1bc3f0 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -1725,25 +1725,20 @@ precompile_test_harness("AbstractInterpreter caching") do load_path import SimpleModule: basic_caller, basic_callee module Custom const CC = Core.Compiler - import Core: MethodInstance, CodeInstance - import .CC: WorldRange, WorldView struct InvalidationTesterToken end - const INVALIDATION_TESTER_CACHE = Core.Compiler.InternalCodeCache(InvalidationTesterToken()) struct InvalidationTester <: CC.AbstractInterpreter world::UInt inf_params::CC.InferenceParams opt_params::CC.OptimizationParams inf_cache::Vector{CC.InferenceResult} - code_cache::Core.Compiler.InternalCodeCache function InvalidationTester(; world::UInt = Base.get_world_counter(), inf_params::CC.InferenceParams = CC.InferenceParams(), opt_params::CC.OptimizationParams = CC.OptimizationParams(), - inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[], - code_cache::Core.Compiler.InternalCodeCache = INVALIDATION_TESTER_CACHE) - return new(world, inf_params, opt_params, inf_cache, code_cache) + inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[]) + return new(world, inf_params, opt_params, inf_cache) end end @@ -1752,7 +1747,6 @@ precompile_test_harness("AbstractInterpreter caching") do load_path CC.get_world_counter(interp::InvalidationTester) = interp.world CC.get_inference_cache(interp::InvalidationTester) = interp.inf_cache CC.cache_owner(::InvalidationTester) = InvalidationTesterToken() - CC.code_cache(interp::InvalidationTester) = WorldView(interp.code_cache, interp.world) end Base.return_types((Float64,)) do x From 7956e4d6882f311de567614e4215f26c9137cc73 Mon Sep 17 00:00:00 2001 From: Valentin Churavy Date: Thu, 8 Feb 2024 15:14:01 -0500 Subject: [PATCH 3/3] fixup! Tag CodeInstance for external abstract interpreter --- test/precompile.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/precompile.jl b/test/precompile.jl index 25e28ae1bc3f0..a7f69dad9f2af 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -1744,7 +1744,7 @@ precompile_test_harness("AbstractInterpreter caching") do load_path CC.InferenceParams(interp::InvalidationTester) = interp.inf_params CC.OptimizationParams(interp::InvalidationTester) = interp.opt_params - CC.get_world_counter(interp::InvalidationTester) = interp.world + CC.get_inference_world(interp::InvalidationTester) = interp.world CC.get_inference_cache(interp::InvalidationTester) = interp.inf_cache CC.cache_owner(::InvalidationTester) = InvalidationTesterToken() end