diff --git a/NEWS.md b/NEWS.md
index b964900fa896e..63d22b8be96e4 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -22,6 +22,9 @@ Language changes
for `.*` etcetera. This also means that "dot operations" automatically
fuse into a single loop, along with other dot calls `f.(x)`. ([#17623])
+ * Newly defined methods are no longer callable from the same dynamic runtime
+ scope they were defined in ([#17057]).
+
Breaking changes
----------------
@@ -50,6 +53,10 @@ This section lists changes that do not have deprecation warnings.
* `broadcast` now treats `Ref` (except for `Ptr`) arguments as 0-dimensional
arrays ([#18965]).
+ * The runtime now enforces when new method definitions can take effect ([#17057]).
+ The flip-side of this is that new method definitions should now reliably actually
+ take effect, and be called when evaluating new code ([#265]).
+
Library improvements
--------------------
diff --git a/base/REPL.jl b/base/REPL.jl
index 4c1bfd799db4d..7bdebcc6cf676 100644
--- a/base/REPL.jl
+++ b/base/REPL.jl
@@ -66,7 +66,7 @@ function eval_user_input(ast::ANY, backend::REPLBackend)
value = eval(Main, ast)
backend.in_eval = false
# note: value wrapped in a closure to ensure it doesn't get passed through expand
- eval(Main, Expr(:(=), :ans, Expr(:call, ()->value)))
+ eval(Main, Expr(:body, Expr(:(=), :ans, QuoteNode(value)), Expr(:return, nothing)))
put!(backend.response_channel, (value, nothing))
end
break
@@ -153,9 +153,9 @@ function print_response(errio::IO, val::ANY, bt, show_value::Bool, have_color::B
if val !== nothing && show_value
try
if specialdisplay === nothing
- display(val)
+ eval(Main, Expr(:body, Expr(:return, Expr(:call, display, QuoteNode(val)))))
else
- display(specialdisplay,val)
+ eval(Main, Expr(:body, Expr(:return, Expr(:call, specialdisplay, QuoteNode(val)))))
end
catch err
println(errio, "Error showing value of type ", typeof(val), ":")
diff --git a/base/REPLCompletions.jl b/base/REPLCompletions.jl
index 99f6b50428e1a..03e84e0381168 100644
--- a/base/REPLCompletions.jl
+++ b/base/REPLCompletions.jl
@@ -281,11 +281,12 @@ function get_type_call(expr::Expr)
found ? push!(args, typ) : push!(args, Any)
end
# use _methods_by_ftype as the function is supplied as a type
- mt = Base._methods_by_ftype(Tuple{ft, args...}, -1)
+ world = typemax(UInt)
+ mt = Base._methods_by_ftype(Tuple{ft, args...}, -1, world)
length(mt) == 1 || return (Any, false)
m = first(mt)
# Typeinference
- params = Core.Inference.InferenceParams()
+ params = Core.Inference.InferenceParams(world)
return_type = Core.Inference.typeinf_type(m[3], m[1], m[2], true, params)
return_type === nothing && return (Any, false)
return (return_type, true)
diff --git a/base/base.jl b/base/base.jl
index 9d9c41cac0fad..b349101bff15b 100644
--- a/base/base.jl
+++ b/base/base.jl
@@ -57,7 +57,10 @@ Alternatively, there is no unique most-specific method.
type MethodError <: Exception
f
args
+ world::UInt
+ MethodError(f::ANY, args::ANY, world::UInt) = new(f, args, world)
end
+MethodError(f::ANY, args::ANY) = MethodError(f, args, typemax(UInt))
"""
EOFError()
diff --git a/base/boot.jl b/base/boot.jl
index f487b01279f37..c9e1b1a800c00 100644
--- a/base/boot.jl
+++ b/base/boot.jl
@@ -308,6 +308,7 @@ unsafe_convert{T}(::Type{T}, x::T) = x
typealias NTuple{N,T} Tuple{Vararg{T,N}}
+
# primitive array constructors
(::Type{Array{T,N}}){T,N}(d::NTuple{N,Int}) =
ccall(:jl_new_array, Array{T,N}, (Any,Any), Array{T,N}, d)
@@ -338,6 +339,15 @@ Array{T}(::Type{T}, m::Int,n::Int) = Array{T,2}(m,n)
Array{T}(::Type{T}, m::Int,n::Int,o::Int) = Array{T,3}(m,n,o)
+# primitive Symbol constructors
+Symbol(s::String) = Symbol(s.data)
+function Symbol(a::Array{UInt8,1})
+ return ccall(:jl_symbol_n, Ref{Symbol}, (Ptr{UInt8}, Int),
+ ccall(:jl_array_ptr, Ptr{UInt8}, (Any,), a),
+ Intrinsics.arraylen(a))
+end
+
+
# docsystem basics
macro doc(x...)
atdoc(x...)
diff --git a/base/client.jl b/base/client.jl
index eff0589986a0c..5b1e35fcb1e5b 100644
--- a/base/client.jl
+++ b/base/client.jl
@@ -142,12 +142,13 @@ function eval_user_input(ast::ANY, show_value)
else
ast = expand(ast)
value = eval(Main, ast)
- eval(Main, Expr(:(=), :ans, Expr(:call, ()->value)))
- if value!==nothing && show_value
+ eval(Main, Expr(:body, Expr(:(=), :ans, QuoteNode(value)), Expr(:return, nothing)))
+ if !(value === nothing) && show_value
if have_color
print(answer_color())
end
- try display(value)
+ try
+ eval(Main, Expr(:body, Expr(:return, Expr(:call, display, QuoteNode(value)))))
catch err
println(STDERR, "Evaluation succeeded, but an error occurred while showing value of type ", typeof(value), ":")
rethrow(err)
diff --git a/base/coreimg.jl b/base/coreimg.jl
index 9f7b4da180552..2e2aa989048a6 100644
--- a/base/coreimg.jl
+++ b/base/coreimg.jl
@@ -6,14 +6,16 @@ import Core: print, println, show, write, unsafe_write, STDOUT, STDERR
ccall(:jl_set_istopmod, Void, (Bool,), false)
-eval(x) = Core.eval(Inference,x)
-eval(m,x) = Core.eval(m,x)
+eval(x) = Core.eval(Inference, x)
+eval(m, x) = Core.eval(m, x)
-include = Core.include
+const include = Core.include
+# conditional to allow redefining Core.Inference after base exists
+isdefined(Main, :Base) || ((::Type{T}){T}(arg) = convert(T, arg)::T)
## Load essential files and libraries
-include("ctypes.jl")
include("essentials.jl")
+include("ctypes.jl")
include("generator.jl")
include("reflection.jl")
include("options.jl")
@@ -33,15 +35,6 @@ include("operators.jl")
include("pointer.jl")
const checked_add = +
const checked_sub = -
-if !isdefined(Main, :Base)
- # conditional to allow redefining Core.Inference after base exists
- (::Type{T}){T}(arg) = convert(T, arg)::T
-end
-
-# Symbol constructors
-Symbol(s::String) = Symbol(s.data)
-Symbol(a::Array{UInt8,1}) =
- ccall(:jl_symbol_n, Ref{Symbol}, (Ptr{UInt8}, Int32), a, length(a))
# core array operations
include("array.jl")
diff --git a/base/docs/core.jl b/base/docs/core.jl
index 4934f4e43aaa4..81993ae2ef2c3 100644
--- a/base/docs/core.jl
+++ b/base/docs/core.jl
@@ -7,7 +7,7 @@ import ..esc, ..push!, ..getindex, ..current_module, ..unsafe_load, ..Csize_t
function doc!(str, ex)
ptr = unsafe_load(Core.Intrinsics.cglobal(:jl_filename, Ptr{UInt8}))
len = ccall(:strlen, Csize_t, (Ptr{UInt8},), ptr)
- file = ccall(:jl_symbol_n, Any, (Ptr{UInt8}, Int32), ptr, len)
+ file = ccall(:jl_symbol_n, Any, (Ptr{UInt8}, Csize_t), ptr, len)
line = unsafe_load(Core.Intrinsics.cglobal(:jl_lineno, Int32)) # Cint
push!(DOCS, (current_module(), ex, str, file, line))
end
diff --git a/base/error.jl b/base/error.jl
index 9beef3b93bdd7..6e9e6cac867c9 100644
--- a/base/error.jl
+++ b/base/error.jl
@@ -38,7 +38,9 @@ systemerror(p, b::Bool; extrainfo=nothing) = b ? throw(Main.Base.SystemError(str
assert(x) = x ? nothing : throw(Main.Base.AssertionError())
macro assert(ex, msgs...)
msg = isempty(msgs) ? ex : msgs[1]
- if !isempty(msgs) && (isa(msg, Expr) || isa(msg, Symbol))
+ if isa(msg, AbstractString)
+ msg = msg # pass-through
+ elseif !isempty(msgs) && (isa(msg, Expr) || isa(msg, Symbol))
# message is an expression needing evaluating
msg = :(Main.Base.string($(esc(msg))))
elseif isdefined(Main, :Base) && isdefined(Main.Base, :string)
@@ -47,7 +49,7 @@ macro assert(ex, msgs...)
# string() might not be defined during bootstrap
msg = :(Main.Base.string($(Expr(:quote,msg))))
end
- :($(esc(ex)) ? $(nothing) : throw(Main.Base.AssertionError($msg)))
+ return :($(esc(ex)) ? $(nothing) : throw(Main.Base.AssertionError($msg)))
end
# NOTE: Please keep the constant values specified below in sync with the doc string
diff --git a/base/inference.jl b/base/inference.jl
index bd0471afd5dd9..3d22339c54396 100644
--- a/base/inference.jl
+++ b/base/inference.jl
@@ -7,6 +7,8 @@ const MAX_TYPEUNION_LEN = 3
const MAX_TYPE_DEPTH = 7
immutable InferenceParams
+ world::UInt
+
# optimization
inlining::Bool
@@ -17,11 +19,15 @@ immutable InferenceParams
MAX_UNION_SPLITTING::Int
# reasonable defaults
- InferenceParams(;inlining::Bool=inlining_enabled(),
- tupletype_len::Int=15, tuple_depth::Int=4,
- tuple_splat::Int=16, union_splitting::Int=4) =
- new(inlining, tupletype_len,
+ function InferenceParams(world::UInt;
+ inlining::Bool = inlining_enabled(),
+ tupletype_len::Int = 15,
+ tuple_depth::Int = 4,
+ tuple_splat::Int = 16,
+ union_splitting::Int = 4)
+ return new(world, inlining, tupletype_len,
tuple_depth, tuple_splat, union_splitting)
+ end
end
const UNION_SPLIT_MISMATCH_ERROR = false
@@ -63,13 +69,15 @@ type InferenceState
mod::Module
currpc::LineNum
- params::InferenceParams
-
# info on the state of inference and the linfo
+ params::InferenceParams
linfo::MethodInstance # used here for the tuple (specTypes, env, Method)
src::CodeInfo
+ min_valid::UInt
+ max_valid::UInt
nargs::Int
stmt_types::Vector{Any}
+ stmt_edges::Vector{Any}
# return type
bestguess #::Type
# current active instruction pointers
@@ -86,11 +94,13 @@ type InferenceState
# call-graph edges connecting from a caller to a callee (and back)
# we shouldn't need to iterate edges very often, so we use it to optimize the lookup from edge -> linenum
# whereas backedges is optimized for iteration
- edges::ObjectIdDict #Dict{InferenceState, Vector{LineNum}}
+ edges::ObjectIdDict # a Dict{InferenceState, Vector{LineNum}}
backedges::Vector{Tuple{InferenceState, Vector{LineNum}}}
# iteration fixed-point detection
fixedpoint::Bool
inworkq::Bool
+ const_api::Bool
+ const_ret::Bool
# TODO: put these in InferenceParams (depends on proper multi-methodcache support)
optimize::Bool
@@ -117,9 +127,10 @@ type InferenceState
src.ssavaluetypes = Any[ NF for i = 1:(src.ssavaluetypes::Int) ]
n = length(code)
- s = Any[ () for i = 1:n ]
+ s_types = Any[ () for i = 1:n ]
# initial types
- s[1] = Any[ VarState(Bottom, true) for i = 1:nslots ]
+ s_types[1] = Any[ VarState(Bottom, true) for i = 1:nslots ]
+ s_edges = Any[ () for i = 1:n ]
atypes = linfo.specTypes
nargs = toplevel ? 0 : linfo.def.nargs
@@ -130,9 +141,9 @@ type InferenceState
if la > 1
atypes = Tuple{Any[Any for i = 1:(la - 1)]..., Tuple.parameters[1]}
end
- s[1][la] = VarState(Tuple, false)
+ s_types[1][la] = VarState(Tuple, false)
else
- s[1][la] = VarState(tuple_tfunc(limit_tuple_depth(params, tupletype_tail(atypes, la))), false)
+ s_types[1][la] = VarState(tuple_tfunc(limit_tuple_depth(params, tupletype_tail(atypes, la))), false)
end
la -= 1
end
@@ -164,10 +175,10 @@ type InferenceState
# replace singleton types with their equivalent Const object
atyp = Const(atyp.instance)
end
- s[1][i] = VarState(atyp, false)
+ s_types[1][i] = VarState(atyp, false)
end
for i = (laty + 1):la
- s[1][i] = VarState(lastatype, false)
+ s_types[1][i] = VarState(lastatype, false)
end
else
@assert la == 0 # wrong number of arguments
@@ -184,16 +195,29 @@ type InferenceState
W = IntSet()
push!(W, 1) #initial pc to visit
- inmodule = toplevel ? current_module() : linfo.def.module # toplevel thunks are inferred in the current module
+ if !toplevel
+ meth = linfo.def
+ inmodule = meth.module
+ else
+ inmodule = current_module() # toplevel thunks are inferred in the current module
+ end
+
+ if cached && !toplevel
+ min_valid = min_world(linfo.def)
+ max_valid = max_world(linfo.def)
+ else
+ min_valid = typemax(UInt)
+ max_valid = typemin(UInt)
+ end
frame = new(
- sp, nl, inmodule, 0,
- params,
- linfo, src, nargs, s, Union{}, W, 1, n,
+ sp, nl, inmodule, 0, params,
+ linfo, src, min_valid, max_valid,
+ nargs, s_types, s_edges, Union{}, W, 1, n,
cur_hand, handler_at, n_handlers,
ssavalue_uses, ssavalue_init,
- ObjectIdDict(), #Dict{InferenceState, Vector{LineNum}}(),
+ ObjectIdDict(), # Dict{InferenceState, Vector{LineNum}}(),
Vector{Tuple{InferenceState, Vector{LineNum}}}(),
- false, false, optimize, cached, false)
+ false, false, false, false, optimize, cached, false)
push!(active, frame)
nactive[] += 1
return frame
@@ -703,7 +727,7 @@ function invoke_tfunc(f::ANY, types::ANY, argtype::ANY, sv::InferenceState)
ft = type_typeof(f)
types = Tuple{ft, types.parameters...}
argtype = Tuple{ft, argtype.parameters...}
- entry = ccall(:jl_gf_invoke_lookup, Any, (Any,), types)
+ entry = ccall(:jl_gf_invoke_lookup, Any, (Any, UInt), types, sv.params.world)
if entry === nothing
return Any
end
@@ -829,7 +853,7 @@ end
#### recursing into expression ####
-function abstract_call_gf_by_type(f::ANY, argtype::ANY, sv::InferenceState)
+function abstract_call_gf_by_type(f::ANY, atype::ANY, sv::InferenceState)
tm = _topmod(sv)
# don't consider more than N methods. this trades off between
# compiler performance and generated code performance.
@@ -838,27 +862,43 @@ function abstract_call_gf_by_type(f::ANY, argtype::ANY, sv::InferenceState)
# It is important for N to be >= the number of methods in the error()
# function, so we can still know that error() is always Bottom.
# here I picked 4.
- argtype = limit_tuple_type(argtype, sv.params)
+ argtype = limit_tuple_type(atype, sv.params)
argtypes = argtype.parameters
- applicable = _methods_by_ftype(argtype, 4)
+ ft = argtypes[1] # TODO: ccall jl_first_argument_datatype here
+ isa(ft, DataType) || return Any # the function being called is unknown. can't properly handle this backedge right now
+ isdefined(ft.name, :mt) || return Any # not callable. should be Bottom, but can't track this backedge right now
+ if ft.name === Type.name
+ tname = ft.parameters[1]
+ if isa(tname, TypeVar)
+ tname = tname.ub
+ end
+ if isa(tname, TypeConstructor)
+ tname = tname.body
+ end
+ if !isa(tname, DataType)
+ # can't track the backedge to the ctor right now
+ # for things like Union
+ return Any
+ end
+ end
+ min_valid = UInt[typemin(UInt)]
+ max_valid = UInt[typemax(UInt)]
+ applicable = _methods_by_ftype(argtype, 4, sv.params.world, min_valid, max_valid)
rettype = Bottom
if applicable === false
# this means too many methods matched
return Any
end
x::Array{Any,1} = applicable
- if isempty(x)
- # no methods match
- # TODO: it would be nice to return Bottom here, but during bootstrap we
- # often compile code that calls methods not defined yet, so it is much
- # safer just to fall back on dynamic dispatch.
- return Any
- end
+ fullmatch = false
for (m::SimpleVector) in x
sig = m[1]::DataType
method = m[3]::Method
sparams = m[2]::SimpleVector
recomputesvec = false
+ if !fullmatch && typeseq(sig, argtype)
+ fullmatch = true
+ end
# limit argument type tuple growth
lsig = length(m[3].sig.parameters)
@@ -969,7 +1009,16 @@ function abstract_call_gf_by_type(f::ANY, argtype::ANY, sv::InferenceState)
break
end
end
- # if rettype is Bottom we've found a method not found error
+ if !(fullmatch || rettype === Any)
+ # also need an edge to the method table in case something gets
+ # added that did not intersect with any existing method
+ add_mt_backedge(ft.name.mt, argtype, sv)
+ update_valid_age!(min_valid[1], max_valid[1], sv)
+ end
+ if isempty(x)
+ # TODO: this is needed because type intersection is wrong in some cases
+ return Any
+ end
#print("=> ", rettype, "\n")
return rettype
end
@@ -1073,7 +1122,9 @@ function pure_eval_call(f::ANY, argtypes::ANY, atype::ANY, vtypes::VarTable, sv:
end
end
- meth = _methods_by_ftype(atype, 1)
+ min_valid = UInt[typemin(UInt)]
+ max_valid = UInt[typemax(UInt)]
+ meth = _methods_by_ftype(atype, 1, sv.params.world, min_valid, max_valid)
if meth === false || length(meth) != 1
return false
end
@@ -1086,7 +1137,9 @@ function pure_eval_call(f::ANY, argtypes::ANY, atype::ANY, vtypes::VarTable, sv:
args = Any[ (a=argtypes[i]; isa(a,Const) ? a.val : a.parameters[1]) for i in 2:length(argtypes) ]
try
- return abstract_eval_constant(f(args...))
+ value = Core._apply_pure(f, args)
+ # TODO: add some sort of edge(s)
+ return abstract_eval_constant(value)
catch
return false
end
@@ -1552,7 +1605,90 @@ end
inlining_enabled() = (JLOptions().can_inline == 1)
coverage_enabled() = (JLOptions().code_coverage != 0)
-function code_for_method(method::Method, atypes::ANY, sparams::SimpleVector, preexisting::Bool=false)
+# TODO: track the worlds for which this InferenceState
+# is being used, and split it if the WIP requires it?
+function converge_valid_age!(sv::InferenceState)
+ # push the validity range of sv into its fixedpoint callers
+ # recursing as needed to cover the graph
+ for (i, _) in sv.backedges
+ if i.fixedpoint
+ updated = false
+ if i.min_valid < sv.min_valid
+ i.min_valid = sv.min_valid
+ updated = true
+ end
+ if i.max_valid > sv.max_valid
+ i.max_valid = sv.max_valid
+ updated = true
+ end
+ @assert !isdefined(i.linfo, :def) || !i.cached || i.min_valid <= i.params.world <= i.max_valid "invalid age range update"
+ if updated
+ converge_valid_age!(i)
+ end
+ end
+ end
+ nothing
+end
+
+# work towards converging the valid age range for sv
+function update_valid_age!(min_valid::UInt, max_valid::UInt, sv::InferenceState)
+ sv.min_valid = max(sv.min_valid, min_valid)
+ sv.max_valid = min(sv.max_valid, max_valid)
+ @assert !isdefined(sv.linfo, :def) || !sv.cached || sv.min_valid <= sv.params.world <= sv.max_valid "invalid age range update"
+ nothing
+end
+update_valid_age!(edge::InferenceState, sv::InferenceState) = update_valid_age!(edge.min_valid, edge.max_valid, sv)
+update_valid_age!(li::MethodInstance, sv::InferenceState) = update_valid_age!(min_world(li), max_world(li), sv)
+
+# temporarily accumulate our edges to later add as backedges in the callee
+function add_backedge(li::MethodInstance, caller::InferenceState)
+ isdefined(caller.linfo, :def) || return # don't add backedges to toplevel exprs
+ if caller.stmt_edges[caller.currpc] === ()
+ caller.stmt_edges[caller.currpc] = []
+ end
+ push!(caller.stmt_edges[caller.currpc], li)
+ update_valid_age!(li, caller)
+ nothing
+end
+
+# temporarily accumulate our no method errors to later add as backedges in the callee method table
+function add_mt_backedge(mt::MethodTable, typ::ANY, caller::InferenceState)
+ isdefined(caller.linfo, :def) || return # don't add backedges to toplevel exprs
+ if caller.stmt_edges[caller.currpc] === ()
+ caller.stmt_edges[caller.currpc] = []
+ end
+ push!(caller.stmt_edges[caller.currpc], mt)
+ push!(caller.stmt_edges[caller.currpc], typ)
+ nothing
+end
+
+# add the real backedges now
+function finalize_backedges(frame::InferenceState)
+ toplevel = !isdefined(frame.linfo, :def)
+ if !toplevel && frame.cached && frame.max_valid == typemax(UInt)
+ caller = frame.linfo
+ for edges in frame.stmt_edges
+ i = 1
+ while i <= length(edges)
+ to = edges[i]
+ if isa(to, MethodInstance)
+ ccall(:jl_method_instance_add_backedge, Void, (Any, Any), to, caller)
+ i += 1
+ else
+ typeassert(to, MethodTable)
+ typ = edges[i + 1]
+ ccall(:jl_method_table_add_backedge, Void, (Any, Any, Any), to, typ, caller)
+ i += 2
+ end
+ end
+ end
+ end
+end
+
+function code_for_method(method::Method, atypes::ANY, sparams::SimpleVector, world::UInt, preexisting::Bool=false)
+ if world < min_world(method) || world > max_world(method)
+ return nothing
+ end
if method.isstaged && !isleaftype(atypes)
# don't call staged functions on abstract types.
# (see issues #8504, #10230)
@@ -1564,33 +1700,49 @@ function code_for_method(method::Method, atypes::ANY, sparams::SimpleVector, pre
if method.specializations !== nothing
# check cached specializations
# for an existing result stored there
- return ccall(:jl_specializations_lookup, Any, (Any, Any), method, atypes)
+ return ccall(:jl_specializations_lookup, Any, (Any, Any, UInt), method, atypes, world)
end
return nothing
end
- return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), method, atypes, sparams)
+ return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any, UInt), method, atypes, sparams, world)
+end
+
+function typeinf_active(linfo::MethodInstance)
+ for infstate in active
+ infstate === nothing && continue
+ infstate = infstate::InferenceState
+ if linfo === infstate.linfo
+ return infstate
+ end
+ end
+ return nothing
end
+function add_backedge(frame::InferenceState, caller::InferenceState, currpc::Int)
+ update_valid_age!(frame, caller)
+ if haskey(caller.edges, frame)
+ Ws = caller.edges[frame]::Vector{Int}
+ if !(currpc in Ws)
+ push!(Ws, currpc)
+ end
+ else
+ Ws = Int[currpc]
+ caller.edges[frame] = Ws
+ push!(frame.backedges, (caller, Ws))
+ end
+end
# build (and start inferring) the inference frame for the linfo
function typeinf_frame(linfo::MethodInstance, caller, optimize::Bool, cached::Bool,
params::InferenceParams)
- frame = nothing
- if linfo.inInference
+ # println(params.world, ' ', linfo)
+ if cached && linfo.inInference
# inference on this signature may be in progress,
# find the corresponding frame in the active list
- for infstate in active
- infstate === nothing && continue
- infstate = infstate::InferenceState
- if linfo === infstate.linfo
- frame = infstate
- break
- end
- end
+ frame = typeinf_active(linfo)
# TODO: this assertion seems iffy
assert(frame !== nothing)
else
- # TODO: verify again here that linfo wasn't just inferred
# inference not started yet, make a new frame for a new lambda
if linfo.def.isstaged
try
@@ -1602,25 +1754,16 @@ function typeinf_frame(linfo::MethodInstance, caller, optimize::Bool, cached::Bo
else
src = get_source(linfo)
end
- linfo.inInference = true
+ cached && (linfo.inInference = true)
frame = InferenceState(linfo, src, optimize, cached, params)
end
frame = frame::InferenceState
- if isa(caller, InferenceState) && !caller.inferred
+ if isa(caller, InferenceState)
# if we were called from inside inference, the caller will be the InferenceState object
# for which the edge was required
- if haskey(caller.edges, frame)
- Ws = caller.edges[frame]::Vector{Int}
- if !(caller.currpc in Ws)
- push!(Ws, caller.currpc)
- end
- else
- @assert caller.currpc > 0
- Ws = Int[caller.currpc]
- caller.edges[frame] = Ws
- push!(frame.backedges, (caller, Ws))
- end
+ @assert caller.currpc > 0
+ add_backedge(frame, caller, caller.currpc)
end
typeinf_loop(frame)
return frame
@@ -1628,7 +1771,7 @@ end
# compute (and cache) an inferred AST and return the current best estimate of the result type
function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, caller::InferenceState)
- code = code_for_method(method, atypes, sparams)
+ code = code_for_method(method, atypes, sparams, caller.params.world)
code === nothing && return Any
code = code::MethodInstance
if isdefined(code, :inferred)
@@ -1637,6 +1780,7 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, caller
# so need to check whether the code itself is also inferred
inf = code.inferred
if !isa(inf, CodeInfo) || (inf::CodeInfo).inferred
+ add_backedge(code, caller)
if isdefined(code, :inferred_const)
return abstract_eval_constant(code.inferred_const)
else
@@ -1655,57 +1799,59 @@ end
# compute an inferred AST and return type
function typeinf_code(method::Method, atypes::ANY, sparams::SimpleVector,
optimize::Bool, cached::Bool, params::InferenceParams)
- code = code_for_method(method, atypes, sparams)
+ code = code_for_method(method, atypes, sparams, params.world)
code === nothing && return (nothing, Any)
return typeinf_code(code::MethodInstance, optimize, cached, params)
end
function typeinf_code(linfo::MethodInstance, optimize::Bool, cached::Bool,
params::InferenceParams)
for i = 1:2 # test-and-lock-and-test
+ i == 2 && ccall(:jl_typeinf_begin, Void, ())
if cached && isdefined(linfo, :inferred)
# see if this code already exists in the cache
# staged functions make this hard since they have two "inferred" conditions,
# so need to check whether the code itself is also inferred
inf = linfo.inferred
- if linfo.jlcall_api == 2
- method = linfo.def
- tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ())
- tree.code = Any[ Expr(:return, QuoteNode(inf)) ]
- tree.slotnames = Any[ compiler_temp_sym for i = 1:method.nargs ]
- tree.slotflags = UInt8[ 0 for i = 1:method.nargs ]
- tree.slottypes = nothing
- tree.ssavaluetypes = 0
- tree.inferred = true
- tree.pure = true
- tree.inlineable = true
- i == 2 && ccall(:jl_typeinf_end, Void, ())
- return (tree, linfo.rettype)
- elseif isa(inf, CodeInfo)
- if (inf::CodeInfo).inferred
+ if min_world(linfo) <= params.world <= max_world(linfo)
+ if linfo.jlcall_api == 2
+ method = linfo.def
+ tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ())
+ tree.code = Any[ Expr(:return, QuoteNode(inf)) ]
+ tree.slotnames = Any[ compiler_temp_sym for i = 1:method.nargs ]
+ tree.slotflags = UInt8[ 0 for i = 1:method.nargs ]
+ tree.slottypes = nothing
+ tree.ssavaluetypes = 0
+ tree.inferred = true
+ tree.pure = true
+ tree.inlineable = true
i == 2 && ccall(:jl_typeinf_end, Void, ())
- return (inf, linfo.rettype)
+ return svec(linfo, tree, linfo.rettype)
+ elseif isa(inf, CodeInfo)
+ if (inf::CodeInfo).inferred
+ i == 2 && ccall(:jl_typeinf_end, Void, ())
+ return svec(linfo, inf, linfo.rettype)
+ end
end
- else
- cached = false # don't need to save the new result
end
end
- i == 1 && ccall(:jl_typeinf_begin, Void, ())
end
frame = typeinf_frame(linfo, nothing, optimize, cached, params)
ccall(:jl_typeinf_end, Void, ())
- frame === nothing && return (nothing, Any)
+ frame === nothing && return svec(nothing, nothing, Any)
frame = frame::InferenceState
- frame.inferred || return (nothing, Any)
- return (frame.src, widenconst(frame.bestguess))
+ frame.inferred || return svec(nothing, nothing, Any)
+ frame.cached || return svec(nothing, frame.src, widenconst(frame.bestguess))
+ return svec(frame.linfo, frame.src, widenconst(frame.bestguess))
end
# compute (and cache) an inferred AST and return the inferred return type
function typeinf_type(method::Method, atypes::ANY, sparams::SimpleVector,
cached::Bool, params::InferenceParams)
- code = code_for_method(method, atypes, sparams)
+ code = code_for_method(method, atypes, sparams, params.world)
code === nothing && return nothing
code = code::MethodInstance
for i = 1:2 # test-and-lock-and-test
+ i == 2 && ccall(:jl_typeinf_begin, Void, ())
if cached && isdefined(code, :inferred)
# see if this rettype already exists in the cache
# staged functions make this hard since they have two "inferred" conditions,
@@ -1716,7 +1862,6 @@ function typeinf_type(method::Method, atypes::ANY, sparams::SimpleVector,
return code.rettype
end
end
- i == 1 && ccall(:jl_typeinf_begin, Void, ())
end
frame = typeinf_frame(code, nothing, cached, cached, params)
ccall(:jl_typeinf_end, Void, ())
@@ -1726,21 +1871,21 @@ function typeinf_type(method::Method, atypes::ANY, sparams::SimpleVector,
return widenconst(frame.bestguess)
end
-function typeinf_ext(linfo::MethodInstance)
+function typeinf_ext(linfo::MethodInstance, world::UInt)
if isdefined(linfo, :def)
# method lambda - infer this specialization via the method cache
- (code, typ) = typeinf_code(linfo, true, true, InferenceParams())
- return code
+ return typeinf_code(linfo, true, true, InferenceParams(world))
else
# toplevel lambda - infer directly
linfo.inInference = true
ccall(:jl_typeinf_begin, Void, ())
frame = InferenceState(linfo, linfo.inferred::CodeInfo,
- true, true, InferenceParams())
+ true, true, InferenceParams(world))
typeinf_loop(frame)
ccall(:jl_typeinf_end, Void, ())
@assert frame.inferred # TODO: deal with this better
- return frame.src
+ @assert frame.linfo === linfo
+ return svec(linfo, frame.src, linfo.rettype)
end
end
@@ -1781,7 +1926,21 @@ function typeinf_loop(frame)
end
end
for i in length(fplist):-1:1
- finish(fplist[i]) # this may add incomplete work to active
+ # optimize and record the results
+ # the reverse order makes it more likely to inline a callee into its caller
+ optimize(fplist[i]::InferenceState) # this may add incomplete work to active
+ end
+ for i in fplist
+ # push valid ages from each node across the graph cycle
+ converge_valid_age!(i::InferenceState)
+ end
+ for i in fplist
+ # record the results
+ finish(i::InferenceState)
+ end
+ for i in fplist
+ # update and record all of the back edges for the finished world
+ finalize_backedges(i::InferenceState)
end
end
end
@@ -1830,6 +1989,7 @@ function typeinf_frame(frame)
delete!(W, pc)
frame.currpc = pc
frame.cur_hand = frame.handler_at[pc]
+ frame.stmt_edges[pc] === () || empty!(frame.stmt_edges[pc])
stmt = frame.src.code[pc]
changes = abstract_interpret(stmt, s[pc]::Array{Any,1}, frame)
if changes === ()
@@ -1969,13 +2129,18 @@ function typeinf_frame(frame)
if finished || frame.fixedpoint
if finished
+ optimize(frame)
finish(frame)
+ finalize_backedges(frame)
else # fixedpoint propagation
- for (i,_) in frame.edges
+ for (i, _) in frame.edges
i = i::InferenceState
if !i.fixedpoint
- i.inworkq || push!(workq, i)
- i.inworkq = true
+ update_valid_age!(i, frame) # work towards converging age at the same time
+ if !i.inworkq
+ push!(workq, i)
+ i.inworkq = true
+ end
i.fixedpoint = true
end
end
@@ -1992,7 +2157,7 @@ function unmark_fixedpoint(frame::InferenceState)
# based upon (recursively) assuming that frame was stuck
if frame.fixedpoint
frame.fixedpoint = false
- for (i,_) in frame.backedges
+ for (i, _) in frame.backedges
unmark_fixedpoint(i)
end
end
@@ -2024,10 +2189,11 @@ function isinlineable(m::Method, src::CodeInfo)
end
# inference completed on `me`
-# update the MethodInstance and notify the edges
-function finish(me::InferenceState)
- for (i,_) in me.edges
- @assert (i::InferenceState).fixedpoint
+# now converge the optimization work
+function optimize(me::InferenceState)
+ for (i, _) in me.edges
+ i = i::InferenceState
+ @assert i.fixedpoint
end
# below may call back into inference and
# see this InferenceState is in an incomplete state
@@ -2044,9 +2210,8 @@ function finish(me::InferenceState)
end
type_annotate!(me)
- do_coverage = coverage_enabled()
- force_noinline = false
# run optimization passes on fulltree
+ force_noinline = false
if me.optimize
# This pass is required for the AST to be valid in codegen
# if any `SSAValue` is created by type inference. Ref issue #6068
@@ -2060,6 +2225,7 @@ function finish(me::InferenceState)
getfield_elim_pass!(me)
# Clean up for `alloc_elim_pass!` and `getfield_elim_pass!`
void_use_elim_pass!(me)
+ do_coverage = coverage_enabled()
meta_elim_pass!(me.src.code::Array{Any,1}, me.src.propagate_inbounds, do_coverage)
# Pop metadata before label reindexing
force_noinline = popmeta!(me.src.code::Array{Any,1}, :noinline)[1]
@@ -2067,94 +2233,135 @@ function finish(me::InferenceState)
end
widen_all_consts!(me.src)
- if isa(me.bestguess, Const)
- bg = me.bestguess::Const
- const_ret = true
- inferred_const = bg.val
- elseif isconstType(me.bestguess, true)
- const_ret = true
- inferred_const = me.bestguess.parameters[1]
- else
- const_ret = false
- inferred_const = nothing
- end
-
- const_api = false
- ispure = me.src.pure
- inferred = me.src
- if const_ret && inferred_const !== nothing
+ if isa(me.bestguess, Const) || isconstType(me.bestguess, true)
+ me.const_ret = true
+ ispure = me.src.pure
if !ispure && length(me.src.code) < 10
ispure = true
for stmt in me.src.code
if !statement_effect_free(stmt, me.src, me.mod)
- ispure = false; break
+ ispure = false
+ break
end
end
if ispure
for fl in me.src.slotflags
if (fl & Slot_UsedUndef) != 0
- ispure = false; break
+ ispure = false
+ break
end
end
end
end
+ me.src.pure = ispure
+
+ do_coverage = coverage_enabled()
if ispure && !do_coverage
# use constant calling convention
- inferred = inferred_const
# Do not emit `jlcall_api == 2` if coverage is enabled
# so that we don't need to add coverage support
# to the `jl_call_method_internal` fast path
# Still set pure flag to make sure `inference` tests pass
# and to possibly enable more optimization in the future
- const_api = true
+ me.const_api = true
+ force_noinline || (me.src.inlineable = true)
end
- me.src.pure = ispure
end
# determine and cache inlineability
if !me.src.inlineable && !force_noinline && isdefined(me.linfo, :def)
- me.src.inlineable = const_api || isinlineable(me.linfo.def, me.src)
+ me.src.inlineable = isinlineable(me.linfo.def, me.src)
end
+ me.src.inferred = true
+ nothing
+end
+# inference completed on `me`
+# update the MethodInstance and notify the edges
+function finish(me::InferenceState)
+ me.currpc = 1 # used by add_backedge
if me.cached
toplevel = !isdefined(me.linfo, :def)
if !toplevel
- if !const_api
- keeptree = me.src.inlineable || ccall(:jl_is_cacheable_sig, Int32, (Any, Any, Any),
- me.linfo.specTypes, me.linfo.def.sig, me.linfo.def) != 0
- if !keeptree
- inferred = nothing
+ min_valid = me.min_valid
+ max_valid = me.max_valid
+ else
+ min_valid = UInt(0)
+ max_valid = UInt(0)
+ end
+
+ # check if the existing me.linfo metadata is also sufficient to describe the current inference result
+ # to decide if it is worth caching it again (which would also clear any generated code)
+ already_inferred = false
+ if isdefined(me.linfo, :inferred)
+ inf = me.linfo.inferred
+ if !isa(inf, CodeInfo) || (inf::CodeInfo).inferred
+ if min_world(me.linfo) == min_valid && max_world(me.linfo) == max_valid
+ already_inferred = true
+ end
+ end
+ end
+
+ if !already_inferred
+ const_flags = (me.const_ret) << 1 | me.const_api
+ if me.const_ret
+ if isa(me.bestguess, Const)
+ inferred_const = (me.bestguess::Const).val
else
- # compress code for non-toplevel thunks
- inferred.code = ccall(:jl_compress_ast, Any, (Any, Any), me.linfo.def, inferred.code)
+ @assert isconstType(me.bestguess, true)
+ inferred_const = me.bestguess.parameters[1]
+ end
+ else
+ inferred_const = nothing
+ end
+ if me.const_api
+ # use constant calling convention
+ inferred_result = inferred_const
+ else
+ inferred_result = me.src
+ end
+
+ if !toplevel
+ if !me.const_api
+ keeptree = me.src.inlineable || ccall(:jl_is_cacheable_sig, Int32, (Any, Any, Any),
+ me.linfo.specTypes, me.linfo.def.sig, me.linfo.def) != 0
+ if !keeptree
+ inferred_result = nothing
+ else
+ # compress code for non-toplevel thunks
+ inferred_result.code = ccall(:jl_compress_ast, Any, (Any, Any), me.linfo.def, inferred_result.code)
+ end
end
end
+ cache = ccall(:jl_set_method_inferred, Ref{MethodInstance}, (Any, Any, Any, Any, Int32, UInt, UInt),
+ me.linfo, widenconst(me.bestguess), inferred_const, inferred_result,
+ const_flags, min_valid, max_valid)
+ if cache !== me.linfo
+ me.linfo.inInference = false
+ me.linfo = cache
+ end
end
- const_flags = (const_ret) << 1 | const_api
- ccall(:jl_set_lambda_rettype, Void, (Any, Any, Int32, Any, Any),
- me.linfo, widenconst(me.bestguess), const_flags,
- inferred_const, inferred)
end
- me.src.inferred = true
- me.linfo.inInference = false
- # finalize and record the linfo result
- me.inferred = true
-
# lazy-delete the item from active for several reasons:
# efficiency, correctness, and recursion-safety
nactive[] -= 1
active[findlast(active, me)] = nothing
# update all of the callers by traversing the backedges
- for (i,_) in me.backedges
+ for (i, _) in me.backedges
if !me.fixedpoint || !i.fixedpoint
# wake up each backedge, unless both me and it already reached a fixed-point (cycle resolution stage)
delete!(i.edges, me)
i.inworkq || push!(workq, i)
i.inworkq = true
end
+ add_backedge(me.linfo, i)
end
+
+ # finalize and record the linfo result
+ me.cached && (me.linfo.inInference = false)
+ me.inferred = true
nothing
end
@@ -2579,8 +2786,9 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
function splitunion(atypes::Vector{Any}, i::Int)
if i == 0
local sig = argtypes_to_type(atypes)
- local li = ccall(:jl_get_spec_lambda, Any, (Any,), sig)
+ local li = ccall(:jl_get_spec_lambda, Any, (Any, UInt), sig, sv.params.world)
li === nothing && return false
+ add_backedge(li, sv)
local stmt = []
push!(stmt, Expr(:(=), linfo_var, li))
spec_hit === nothing && (spec_hit = genlabel(sv))
@@ -2623,7 +2831,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
append!(stmts, match)
if error_label !== nothing
push!(stmts, error_label)
- push!(stmts, Expr(:call, GlobalRef(_topmod(sv.mod), :error), "error in type inference due to #265"))
+ push!(stmts, Expr(:call, GlobalRef(_topmod(sv.mod), :error), "fatal error in type inference (type bound)"))
end
local ret_var, merge
if spec_miss !== nothing
@@ -2646,8 +2854,9 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
return (ret_var, stmts)
end
else
- local cache_linfo = ccall(:jl_get_spec_lambda, Any, (Any,), atype_unlimited)
+ local cache_linfo = ccall(:jl_get_spec_lambda, Any, (Any, UInt), atype_unlimited, sv.params.world)
cache_linfo === nothing && return NF
+ add_backedge(cache_linfo, sv)
e.head = :invoke
unshift!(e.args, cache_linfo)
return e
@@ -2663,7 +2872,9 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
else
atype = atype_unlimited
end
- meth = _methods_by_ftype(atype, 1)
+ min_valid = UInt[typemin(UInt)]
+ max_valid = UInt[typemax(UInt)]
+ meth = _methods_by_ftype(atype, 1, sv.params.world, min_valid, max_valid)
if meth === false || length(meth) != 1
return invoke_NF()
end
@@ -2728,11 +2939,12 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
end
# see if the method has been previously inferred (and cached)
- linfo = code_for_method(method, metharg, methsp, !force_infer) # Union{Void, MethodInstance}
+ linfo = code_for_method(method, metharg, methsp, sv.params.world, !force_infer) # Union{Void, MethodInstance}
isa(linfo, MethodInstance) || return invoke_NF()
linfo = linfo::MethodInstance
if linfo.jlcall_api == 2
# in this case function can be inlined to a constant
+ add_backedge(linfo, sv)
return inline_as_constant(linfo.inferred, argexprs, sv)
end
@@ -2753,9 +2965,12 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
frame.stmt_types[1][3] = VarState(atypes[3], false)
typeinf_loop(frame)
else
- if isdefined(linfo, :inferred)
+ if isdefined(linfo, :inferred) && isa(linfo.inferred, CodeInfo) && (linfo.inferred::CodeInfo).inferred
# use cache
src = linfo.inferred
+ elseif linfo.inInference
+ # use WIP
+ frame = typeinf_active(linfo)
elseif force_infer
# create inferred code on-demand
# but if we decided in the past not to try to infer this particular signature
@@ -2768,12 +2983,21 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
# compute the return value
if isa(frame, InferenceState)
frame = frame::InferenceState
- frame.inferred || return invoke_NF()
linfo = frame.linfo
src = frame.src
- if linfo.jlcall_api == 2
- # in this case function can be inlined to a constant
- return inline_as_constant(linfo.inferred, argexprs, sv)
+ if frame.const_api # handle like jlcall_api == 2
+ if frame.inferred || !frame.cached
+ add_backedge(frame.linfo, sv)
+ else
+ add_backedge(frame, sv, 0)
+ end
+ if isa(frame.bestguess, Const)
+ inferred_const = (frame.bestguess::Const).val
+ else
+ @assert isconstType(frame.bestguess, true)
+ inferred_const = frame.bestguess.parameters[1]
+ end
+ return inline_as_constant(inferred_const, argexprs, sv)
end
rettype = widenconst(frame.bestguess)
else
@@ -2788,6 +3012,15 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
return invoke_NF()
end
+ # create the backedge
+ if isa(frame, InferenceState) && !frame.inferred && frame.cached
+ # in this case, the actual backedge linfo hasn't been computed
+ # yet, but will be when inference on the frame finishes
+ add_backedge(frame, sv, 0)
+ else
+ add_backedge(linfo, sv)
+ end
+
spvals = Any[]
for i = 1:length(methsp)
push!(spvals, methsp[i])
@@ -2906,7 +3139,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
lastexpr = pop!(body.args)
if isa(lastexpr,LabelNode)
push!(body.args, lastexpr)
- push!(body.args, Expr(:call, GlobalRef(_topmod(sv.mod),:error), "fatal error in type inference"))
+ push!(body.args, Expr(:call, GlobalRef(_topmod(sv.mod), :error), "fatal error in type inference (lowering)"))
lastexpr = nothing
elseif !(isa(lastexpr,Expr) && lastexpr.head === :return)
# code sometimes ends with a meta node, e.g. inbounds pop
@@ -2947,13 +3180,14 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
end
end
- do_coverage = coverage_enabled()
inlining_ignore = function (stmt::ANY)
isa(stmt, Expr) && return is_meta_expr(stmt::Expr)
isa(stmt, LineNumberNode) && return true
stmt === nothing && return true
return false
end
+
+ do_coverage = coverage_enabled()
if do_coverage
line = method.line
if !isempty(stmts) && isa(stmts[1], LineNumberNode)
@@ -4089,11 +4323,10 @@ function reindex_labels!(sv::InferenceState)
end
end
-function return_type(f::ANY, t::ANY, params::InferenceParams=InferenceParams())
- # NOTE: if not processed by pure_eval_call during inference, a call to return_type
- # might use difference InferenceParams than the method it is contained in...
+function return_type(f::ANY, t::ANY)
+ params = InferenceParams(ccall(:jl_get_tls_world_age, UInt, ()))
rt = Union{}
- for m in _methods(f, t, -1)
+ for m in _methods(f, t, -1, params.world)
ty = typeinf_type(m[3], m[1], m[2], true, params)
ty === nothing && return Any
rt = tmerge(rt, ty)
@@ -4120,7 +4353,7 @@ let fs = Any[typeinf_ext, typeinf_loop, typeinf_edge, occurs_outside_getfield, e
end
end
for f in fs
- for m in _methods_by_ftype(Tuple{typeof(f), Vararg{Any}}, 10)
+ for m in _methods_by_ftype(Tuple{typeof(f), Vararg{Any}}, 10, typemax(UInt))
# remove any TypeVars from the intersection
typ = Any[m[1].parameters...]
for i = 1:length(typ)
@@ -4128,7 +4361,7 @@ let fs = Any[typeinf_ext, typeinf_loop, typeinf_edge, occurs_outside_getfield, e
typ[i] = typ[i].ub
end
end
- typeinf_type(m[3], Tuple{typ...}, m[2], true, InferenceParams())
+ typeinf_type(m[3], Tuple{typ...}, m[2], true, InferenceParams(typemax(UInt)))
end
end
end
diff --git a/base/methodshow.jl b/base/methodshow.jl
index f00d8aa354760..2be02de975c56 100644
--- a/base/methodshow.jl
+++ b/base/methodshow.jl
@@ -58,9 +58,9 @@ function arg_decl_parts(m::Method)
return tv, decls, file, line
end
-function kwarg_decl(sig::ANY, kwtype::DataType)
- sig = Tuple{kwtype, Core.AnyVector, sig.parameters...}
- kwli = ccall(:jl_methtable_lookup, Any, (Any, Any), kwtype.name.mt, sig)
+function kwarg_decl(m::Method, kwtype::DataType)
+ sig = Tuple{kwtype, Core.AnyVector, m.sig.parameters...}
+ kwli = ccall(:jl_methtable_lookup, Any, (Any, Any, UInt), kwtype.name.mt, sig, max_world(m))
if kwli !== nothing
kwli = kwli::Method
src = kwli.isstaged ? kwli.unspecialized.inferred : kwli.source
@@ -104,7 +104,7 @@ function show(io::IO, m::Method; kwtype::Nullable{DataType}=Nullable{DataType}()
join(io, [isempty(d[2]) ? d[1] : d[1]*"::"*d[2] for d in decls[2:end]],
", ", ", ")
if !isnull(kwtype)
- kwargs = kwarg_decl(m.sig, get(kwtype))
+ kwargs = kwarg_decl(m, get(kwtype))
if !isempty(kwargs)
print(io, "; ")
join(io, kwargs, ", ", ", ")
@@ -227,7 +227,7 @@ function show(io::IO, ::MIME"text/html", m::Method; kwtype::Nullable{DataType}=N
join(io, [isempty(d[2]) ? d[1] : d[1]*"::"*d[2]*""
for d in decls[2:end]], ", ", ", ")
if !isnull(kwtype)
- kwargs = kwarg_decl(m.sig, get(kwtype))
+ kwargs = kwarg_decl(m, get(kwtype))
if !isempty(kwargs)
print(io, "; ")
join(io, kwargs, ", ", ", ")
diff --git a/base/multi.jl b/base/multi.jl
index a7d112df5608f..62063e50dfa36 100644
--- a/base/multi.jl
+++ b/base/multi.jl
@@ -124,10 +124,10 @@ for (idx, tname) in enumerate(msgtypes)
end
end
-function deserialize_msg(s)
+function deserialize_msg(s::AbstractSerializer)
idx = read(s.io, UInt8)
t = msgtypes[idx]
- return deserialize_msg(s, t)
+ return eval(current_module(), Expr(:body, Expr(:return, Expr(:call, deserialize_msg, QuoteNode(s), QuoteNode(t)))))
end
function send_msg_unknown(s::IO, header, msg)
@@ -321,7 +321,7 @@ function send_msg_(w::Worker, header, msg, now::Bool)
try
reset_state(w.w_serializer)
serialize_hdr_raw(io, header)
- serialize(w.w_serializer, msg) # io is wrapped in w_serializer
+ eval(current_module(), Expr(:body, Expr(:return, Expr(:call, serialize, QuoteNode(w.w_serializer), QuoteNode(msg))))) # io is wrapped in w_serializer
write(io, MSG_BOUNDARY)
if !now && w.gcflag
@@ -812,7 +812,7 @@ function lookup_ref(pg, rrid, f)
rv = get(pg.refs, rrid, false)
if rv === false
# first we've heard of this ref
- rv = RemoteValue(f())
+ rv = RemoteValue(eval(Main, Expr(:body, Expr(:return, Expr(:call, f)))))
pg.refs[rrid] = rv
push!(rv.clientset, rrid.whence)
end
diff --git a/base/precompile.jl b/base/precompile.jl
index ee70a0744a4e0..c8ce400142c17 100644
--- a/base/precompile.jl
+++ b/base/precompile.jl
@@ -454,6 +454,9 @@ precompile(Base.string, (String, String, Char))
precompile(Base.string, (String, String, Int))
precompile(Base.vect, (Base.LineEdit.Prompt, String))
+# Speed up type inference in the post-Base world redefinition of convert
+isdefined(Core, :Inference) && Base.code_typed(Base.code_typed)
+
# Speeding up addprocs for LocalManager
precompile(Base.start_worker, ())
precompile(Base.start_worker, (Base.TTY,))
diff --git a/base/reflection.jl b/base/reflection.jl
index 6dda825683c6a..ebcbea5e686bf 100644
--- a/base/reflection.jl
+++ b/base/reflection.jl
@@ -342,7 +342,7 @@ tt_cons(t::ANY, tup::ANY) = (@_pure_meta; Tuple{t, (isa(tup, Type) ? tup.paramet
Returns an array of lowered ASTs for the methods matching the given generic function and type signature.
"""
-function code_lowered(f, t::ANY=Tuple)
+function code_lowered(f::ANY, t::ANY=Tuple)
asts = map(methods(f, t)) do m
m = m::Method
return uncompressed_ast(m, m.isstaged ? m.unspecialized.inferred : m.source)
@@ -352,13 +352,16 @@ end
# low-level method lookup functions used by the compiler
-function _methods(f::ANY,t::ANY,lim)
+function _methods(f::ANY, t::ANY, lim::Int, world::UInt)
ft = isa(f,Type) ? Type{f} : typeof(f)
tt = isa(t,Type) ? Tuple{ft, t.parameters...} : Tuple{ft, t...}
- return _methods_by_ftype(tt, lim)
+ return _methods_by_ftype(tt, lim, world)
end
-function _methods_by_ftype(t::ANY, lim)
+function _methods_by_ftype(t::ANY, lim::Int, world::UInt)
+ return _methods_by_ftype(t, lim, world, UInt[typemin(UInt)], UInt[typemax(UInt)])
+end
+function _methods_by_ftype(t::ANY, lim::Int, world::UInt, min::Array{UInt,1}, max::Array{UInt,1})
tp = t.parameters::SimpleVector
nu = 1
for ti in tp
@@ -367,15 +370,16 @@ function _methods_by_ftype(t::ANY, lim)
end
end
if 1 < nu <= 64
- return _methods(Any[tp...], length(tp), lim, [])
+ return _methods_by_ftype(Any[tp...], length(tp), lim, [], world, min, max)
end
# XXX: the following can return incorrect answers that the above branch would have corrected
- return ccall(:jl_matching_methods, Any, (Any,Cint,Cint), t, lim, 0)
+ return ccall(:jl_matching_methods, Any, (Any, Cint, Cint, UInt, Ptr{UInt}, Ptr{UInt}), t, lim, 0, world, min, max)
end
-function _methods(t::Array,i,lim::Integer,matching::Array{Any,1})
+function _methods_by_ftype(t::Array, i, lim::Integer, matching::Array{Any,1}, world::UInt, min::Array{UInt,1}, max::Array{UInt,1})
if i == 0
- new = ccall(:jl_matching_methods, Any, (Any,Cint,Cint), Tuple{t...}, lim, 0)
+ world = typemax(UInt)
+ new = ccall(:jl_matching_methods, Any, (Any, Cint, Cint, UInt, Ptr{UInt}, Ptr{UInt}), Tuple{t...}, lim, 0, world, min, max)
new === false && return false
append!(matching, new::Array{Any,1})
else
@@ -383,14 +387,14 @@ function _methods(t::Array,i,lim::Integer,matching::Array{Any,1})
if isa(ti, Union)
for ty in (ti::Union).types
t[i] = ty
- if _methods(t,i-1,lim,matching) === false
+ if _methods_by_ftype(t, i - 1, lim, matching, world, min, max) === false
t[i] = ti
return false
end
end
t[i] = ti
else
- return _methods(t,i-1,lim,matching)
+ return _methods_by_ftype(t, i - 1, lim, matching, world, min, max)
end
end
return matching
@@ -430,7 +434,8 @@ function methods(f::ANY, t::ANY)
throw(ArgumentError("argument is not a generic function"))
end
t = to_tuple_type(t)
- return MethodList(Method[m[3] for m in _methods(f,t,-1)], typeof(f).name.mt)
+ world = typemax(UInt)
+ return MethodList(Method[m[3] for m in _methods(f, t, -1, world)], typeof(f).name.mt)
end
methods(f::Core.Builtin) = MethodList(Method[], typeof(f).name.mt)
@@ -438,7 +443,10 @@ methods(f::Core.Builtin) = MethodList(Method[], typeof(f).name.mt)
function methods_including_ambiguous(f::ANY, t::ANY)
ft = isa(f,Type) ? Type{f} : typeof(f)
tt = isa(t,Type) ? Tuple{ft, t.parameters...} : Tuple{ft, t...}
- ms = ccall(:jl_matching_methods, Any, (Any,Cint,Cint), tt, -1, 1)::Array{Any,1}
+ world = typemax(UInt)
+ min = UInt[typemin(UInt)]
+ max = UInt[typemax(UInt)]
+ ms = ccall(:jl_matching_methods, Any, (Any, Cint, Cint, UInt, Ptr{UInt}, Ptr{UInt}), tt, -1, 1, world, min, max)::Array{Any,1}
return MethodList(Method[m[3] for m in ms], typeof(f).name.mt)
end
function methods(f::ANY)
@@ -523,6 +531,7 @@ function _dump_function(f::ANY, t::ANY, native::Bool, wrapper::Bool,
throw(ArgumentError("argument is not a generic function"))
end
# get the MethodInstance for the method match
+ world = typemax(UInt)
meth = which(f, t)
t = to_tuple_type(t)
ft = isa(f, Type) ? Type{f} : typeof(f)
@@ -530,21 +539,21 @@ function _dump_function(f::ANY, t::ANY, native::Bool, wrapper::Bool,
(ti, env) = ccall(:jl_match_method, Any, (Any, Any, Any),
tt, meth.sig, meth.tvars)::SimpleVector
meth = func_for_method_checked(meth, tt)
- linfo = ccall(:jl_specializations_get_linfo, Ref{Core.MethodInstance}, (Any, Any, Any), meth, tt, env)
+ linfo = ccall(:jl_specializations_get_linfo, Ref{Core.MethodInstance}, (Any, Any, Any, UInt), meth, tt, env, world)
# get the code for it
- return _dump_function(linfo, native, wrapper, strip_ir_metadata, dump_module, syntax, optimize, params)
+ return _dump_function_linfo(linfo, world, native, wrapper, strip_ir_metadata, dump_module, syntax, optimize, params)
end
-function _dump_function(linfo::Core.MethodInstance, native::Bool, wrapper::Bool,
- strip_ir_metadata::Bool, dump_module::Bool, syntax::Symbol=:att,
- optimize::Bool=true, params::CodegenParams=CodegenParams())
+function _dump_function_linfo(linfo::Core.MethodInstance, world::UInt, native::Bool, wrapper::Bool,
+ strip_ir_metadata::Bool, dump_module::Bool, syntax::Symbol=:att,
+ optimize::Bool=true, params::CodegenParams=CodegenParams())
if syntax != :att && syntax != :intel
throw(ArgumentError("'syntax' must be either :intel or :att"))
end
if native
- llvmf = ccall(:jl_get_llvmf_decl, Ptr{Void}, (Any, Bool, CodegenParams), linfo, wrapper, params)
+ llvmf = ccall(:jl_get_llvmf_decl, Ptr{Void}, (Any, UInt, Bool, CodegenParams), linfo, world, wrapper, params)
else
- llvmf = ccall(:jl_get_llvmf_defn, Ptr{Void}, (Any, Bool, Bool, CodegenParams), linfo, wrapper, optimize, params)
+ llvmf = ccall(:jl_get_llvmf_defn, Ptr{Void}, (Any, UInt, Bool, Bool, CodegenParams), linfo, world, wrapper, optimize, params)
end
if llvmf == C_NULL
error("could not compile the specified method")
@@ -612,10 +621,11 @@ function code_typed(f::ANY, types::ANY=Tuple; optimize=true)
end
types = to_tuple_type(types)
asts = []
- params = Core.Inference.InferenceParams()
- for x in _methods(f, types, -1)
+ world = typemax(UInt)
+ params = Core.Inference.InferenceParams(world)
+ for x in _methods(f, types, -1, world)
meth = func_for_method_checked(x[3], types)
- (code, ty) = Core.Inference.typeinf_code(meth, x[1], x[2], optimize, optimize, params)
+ (_, code, ty) = Core.Inference.typeinf_code(meth, x[1], x[2], optimize, optimize, params)
code === nothing && error("inference not successful") # Inference disabled?
push!(asts, uncompressed_ast(meth, code) => ty)
end
@@ -629,8 +639,9 @@ function return_types(f::ANY, types::ANY=Tuple)
end
types = to_tuple_type(types)
rt = []
- params = Core.Inference.InferenceParams()
- for x in _methods(f, types, -1)
+ world = typemax(UInt)
+ params = Core.Inference.InferenceParams(world)
+ for x in _methods(f, types, -1, world)
meth = func_for_method_checked(x[3], types)
ty = Core.Inference.typeinf_type(meth, x[1], x[2], true, params)
ty === nothing && error("inference not successful") # Inference disabled?
@@ -659,7 +670,7 @@ function which(f::ANY, t::ANY)
else
ft = isa(f,Type) ? Type{f} : typeof(f)
tt = Tuple{ft, t.parameters...}
- m = ccall(:jl_gf_invoke_lookup, Any, (Any,), tt)
+ m = ccall(:jl_gf_invoke_lookup, Any, (Any, UInt), tt, typemax(UInt))
if m === nothing
error("no method found for the specified argument types")
end
@@ -765,13 +776,14 @@ true
function method_exists(f::ANY, t::ANY)
t = to_tuple_type(t)
t = Tuple{isa(f,Type) ? Type{f} : typeof(f), t.parameters...}
- return ccall(:jl_method_exists, Cint, (Any, Any), typeof(f).name.mt, t) != 0
+ return ccall(:jl_method_exists, Cint, (Any, Any, UInt), typeof(f).name.mt, t,
+ typemax(UInt)) != 0
end
function isambiguous(m1::Method, m2::Method)
ti = typeintersect(m1.sig, m2.sig)
ti === Bottom && return false
- ml = _methods_by_ftype(ti, -1)
+ ml = _methods_by_ftype(ti, -1, typemax(UInt))
isempty(ml) && return true
for m in ml
if ti <: m[3].sig
@@ -780,3 +792,8 @@ function isambiguous(m1::Method, m2::Method)
end
return true
end
+
+min_world(m::Method) = reinterpret(UInt, m.min_world)
+max_world(m::Method) = reinterpret(UInt, m.max_world)
+min_world(m::Core.MethodInstance) = reinterpret(UInt, m.min_world)
+max_world(m::Core.MethodInstance) = reinterpret(UInt, m.max_world)
diff --git a/base/replutil.jl b/base/replutil.jl
index 0a5ddaed2d24a..e4061ae080c5e 100644
--- a/base/replutil.jl
+++ b/base/replutil.jl
@@ -335,9 +335,14 @@ function showerror(io::IO, ex::MethodError)
basef = getfield(Base, name)
if basef !== ex.f && method_exists(basef, arg_types)
println(io)
- print(io, "you may have intended to import Base.", name)
+ print(io, "You may have intended to import Base.", name)
end
end
+ if method_exists(ex.f, arg_types)
+ curworld = ccall(:jl_get_world_counter, UInt, ())
+ println(io)
+ print(io, "The applicable method may be too new: running in world age $(ex.world), while current world is $(curworld).")
+ end
if !is_arg_types
# Check for row vectors used where a column vector is intended.
vec_args = []
@@ -514,7 +519,7 @@ function show_method_candidates(io::IO, ex::MethodError, kwargs::Vector=Any[])
kwords = Symbol[]
if isdefined(ft.name.mt, :kwsorter)
kwsorter_t = typeof(ft.name.mt.kwsorter)
- kwords = kwarg_decl(method.sig, kwsorter_t)
+ kwords = kwarg_decl(method, kwsorter_t)
length(kwords) > 0 && print(buf, "; ", join(kwords, ", "))
end
print(buf, ")")
@@ -535,6 +540,13 @@ function show_method_candidates(io::IO, ex::MethodError, kwargs::Vector=Any[])
end
end
end
+ if ex.world < min_world(method)
+ print(buf, " (method too new to be called from this world context.)")
+ end
+ if ex.world > max_world(method)
+ print(buf, " (method deleted before this world age.)")
+ end
+ # TODO: indicate if it's in the wrong world
push!(lines, (buf, right_matches))
end
end
diff --git a/base/serialize.jl b/base/serialize.jl
index d3b384cb5c059..fb662009658fd 100644
--- a/base/serialize.jl
+++ b/base/serialize.jl
@@ -416,7 +416,7 @@ function serialize_typename(s::AbstractSerializer, t::TypeName)
serialize(s, t.primary.ninitialized)
if isdefined(t, :mt)
serialize(s, t.mt.name)
- serialize(s, t.mt.defs)
+ serialize(s, collect(Base.MethodList(t.mt)))
serialize(s, t.mt.max_args)
if isdefined(t.mt, :kwsorter)
serialize(s, t.mt.kwsorter)
@@ -621,7 +621,7 @@ function deserialize(s::AbstractSerializer, ::Type{Method})
name = deserialize(s)::Symbol
file = deserialize(s)::Symbol
line = deserialize(s)::Int32
- sig = deserialize(s)
+ sig = deserialize(s)::DataType
tvars = deserialize(s)::Union{SimpleVector, TypeVar}
sparam_syms = deserialize(s)::SimpleVector
ambig = deserialize(s)::Union{Array{Any,1}, Void}
@@ -650,6 +650,10 @@ function deserialize(s::AbstractSerializer, ::Type{Method})
else
meth.source = template
end
+ ftype = ccall(:jl_first_argument_datatype, Any, (Any,), sig)::DataType
+ if isdefined(ftype.name, :mt) && nothing === ccall(:jl_methtable_lookup, Any, (Any, Any, UInt), ftype.name.mt, sig, typemax(UInt))
+ ccall(:jl_method_table_insert, Void, (Any, Any, Ptr{Void}), ftype.name.mt, meth, C_NULL)
+ end
known_object_data[lnumber] = meth
end
return meth
@@ -803,8 +807,12 @@ function deserialize_typename(s::AbstractSerializer, number)
if makenew
tn.mt = ccall(:jl_new_method_table, Any, (Any, Any), name, tn.module)
tn.mt.name = mtname
- tn.mt.defs = defs
tn.mt.max_args = maxa
+ for def in defs
+ if isdefined(def, :sig)
+ ccall(:jl_method_table_insert, Void, (Any, Any, Ptr{Void}), tn.mt, def, C_NULL)
+ end
+ end
end
tag = Int32(read(s.io, UInt8)::UInt8)
if tag != UNDEFREF_TAG
diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl
index da0a3be768008..e2da90d672359 100644
--- a/base/sparse/sparsematrix.jl
+++ b/base/sparse/sparsematrix.jl
@@ -2253,8 +2253,10 @@ for (Bsig, A1sig, A2sig, gbb, funcname) in
global $funcname
function $funcname(f::Function, B::$Bsig, A1::$A1sig, A2::$A2sig)
func = @get! cache f gen_broadcast_function_sparse($gbb, f, ($A1sig) <: SparseMatrixCSC)
- func(B, A1, A2)
- B
+ # need eval because func was just created by gen_broadcast_function_sparse
+ # TODO: convert this to a generated function
+ eval(current_module(), Expr(:body, Expr(:return, Expr(:call, QuoteNode(func), QuoteNode(B), QuoteNode(A1), QuoteNode(A2)))))
+ return B
end
end # let broadcast_cache
end
diff --git a/base/sysimg.jl b/base/sysimg.jl
index 915d9eff063ae..a25f2ac5895e9 100644
--- a/base/sysimg.jl
+++ b/base/sysimg.jl
@@ -19,8 +19,12 @@ INCLUDE_STATE = 1 # include = Core.include
include("coreio.jl")
-eval(x) = Core.eval(Base,x)
-eval(m,x) = Core.eval(m,x)
+eval(x) = Core.eval(Base, x)
+eval(m, x) = Core.eval(m, x)
+(::Type{T}){T}(arg) = convert(T, arg)::T
+(::Type{VecElement{T}}){T}(arg) = VecElement{T}(convert(T, arg))
+convert{T<:VecElement}(::Type{T}, arg) = T(arg)
+convert{T<:VecElement}(::Type{T}, arg::T) = arg
# init core docsystem
import Core: @doc, @__doc__, @doc_str
@@ -42,8 +46,8 @@ if false
end
## Load essential files and libraries
-include("ctypes.jl")
include("essentials.jl")
+include("ctypes.jl")
include("base.jl")
include("generator.jl")
include("reflection.jl")
@@ -63,19 +67,9 @@ include("int.jl")
include("operators.jl")
include("pointer.jl")
include("refpointer.jl")
-(::Type{T}){T}(arg) = convert(T, arg)::T
-(::Type{VecElement{T}}){T}(arg) = VecElement{T}(convert(T, arg))
-convert{T<:VecElement}(::Type{T}, arg) = T(arg)
-convert{T<:VecElement}(::Type{T}, arg::T) = arg
include("checked.jl")
importall .Checked
-# Symbol constructors
-if !isdefined(Core, :Inference)
- Symbol(s::String) = Symbol(s.data)
- Symbol(a::Array{UInt8,1}) =
- ccall(:jl_symbol_n, Ref{Symbol}, (Ptr{UInt8}, Int32), a, length(a))
-end
# vararg Symbol constructor
Symbol(x...) = Symbol(string(x...))
diff --git a/doc/src/devdocs/ast.md b/doc/src/devdocs/ast.md
index 68d40c9e226ae..ea6662d448bdc 100644
--- a/doc/src/devdocs/ast.md
+++ b/doc/src/devdocs/ast.md
@@ -64,6 +64,7 @@ The following data types exist in lowered form:
Marks a point where a variable is created. This has the effect of resetting a variable to undefined.
+
### Expr types
These symbols appear in the `head` field of `Expr`s in lowered form.
@@ -192,9 +193,10 @@ These symbols appear in the `head` field of `Expr`s in lowered form.
* `:pop_loc`: returns to the source location before the matching `:push_loc`.
+
### Method
-A unique'd container describing the shared metadata for a single (unspecialized) method.
+A unique'd container describing the shared metadata for a single method.
* `name`, `module`, `file`, `line`, `sig`
@@ -223,6 +225,11 @@ A unique'd container describing the shared metadata for a single (unspecialized)
Descriptive bit-fields for the source code of this Method.
+ * `min_world` / `max_world`
+
+ The range of world ages for which this method is visible to dispatch.
+
+
### MethodInstance
A unique'd container describing a single callable signature for a Method. See especially [Proper maintenance and care of multi-threading locks](@ref)
@@ -264,9 +271,16 @@ for important details on how to modify these fields safely.
The ABI to use when calling `fptr`. Some significant ones include:
- * 0 - not compiled yet
+ * 0 - Not compiled yet.
* 1 - JL_CALLABLE `jl_value_t *(*)(jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)`
- * 2 - constant (stored in `inferred`)
+ * 2 - Constant (value stored in `inferred`)
+ * 3 - With Static-parameters forwarded `jl_value_t *(*)(jl_svec_t *sparams, jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)`
+ * 4 - Run in interpreter `jl_value_t *(*)(jl_method_instance_t *meth, jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)`
+
+ * `min_world` / `max_world`
+
+ The range of world ages for which this method instance is valid to be called.
+
### CodeInfo
@@ -320,6 +334,7 @@ Boolean properties:
Whether this is known to be a pure function of its arguments, without respect to the
state of the method caches or other mutable global state.
+
## Surface syntax AST
Front end ASTs consist entirely of `Expr`s and atoms (e.g. symbols, numbers). There is generally
diff --git a/doc/src/manual/metaprogramming.md b/doc/src/manual/metaprogramming.md
index 27f1511819d1e..c45d11289b7ad 100644
--- a/doc/src/manual/metaprogramming.md
+++ b/doc/src/manual/metaprogramming.md
@@ -921,6 +921,53 @@ true for macros too - and just like for macros, the use of [`eval()`](@ref) in a
is a sign that you're doing something the wrong way.) However, unlike macros, the runtime system
cannot correctly handle a call to [`eval()`](@ref), so it is disallowed.
+It is also important to see how `@generated` functions interact with method redefinition.
+Following the principle that a correct `@generated` function must not observe any
+mutable state or cause any mutation of global state, we see the following behavior.
+Observe that the generated function *cannot* call any method that was not defined
+prior to the *definition* of the generated function itself.
+
+```julia
+julia> # initially f(x) has one definition:
+
+julia> f(x) = "original definition";
+
+julia> # start some other operations that use f(x):
+
+julia> g(x) = f(x);
+
+julia> @generated gen1(x) = f(x);
+
+julia> @generated gen2(x) = :(f(x));
+
+julia> # now we add some new definitions for f(x):
+
+julia> f(x::Int) = "definition for Int";
+
+julia> f(x::Type{Int}) = "definition for Type{Int}";
+
+julia> # and compare how these results differ:
+
+julia> f(1)
+"definition for Int"
+
+julia> g(1)
+"definition for Int"
+
+julia> gen1(1)
+"original definition"
+
+julia> gen2(1)
+"definition for Int"
+
+julia> # each method of a generated function has its own view of defined functions:
+
+julia> @generated gen1(x::Real) = f(x);
+
+julia> gen1(1)
+"definition for Type{Int}"
+```
+
The example generated function `foo` above did not do anything a normal function `foo(x) = x * x`
could not do (except printing the type on the first invocation, and incurring higher overhead).
However, the power of a generated function lies in its ability to compute different quoted expressions
diff --git a/doc/src/manual/methods.md b/doc/src/manual/methods.md
index cb43659dac4c2..24a7ef9ac85db 100644
--- a/doc/src/manual/methods.md
+++ b/doc/src/manual/methods.md
@@ -389,6 +389,114 @@ false
The `same_type_numeric` function behaves much like the `same_type` function defined above, but
is only defined for pairs of numbers.
+Redefining Methods
+------------------
+
+When redefining a method or adding new methods,
+it is important to realize that these changes don't take effect immediately.
+This is key to Julia's ability to statically infer and compile code to run fast,
+without the usual JIT tricks and overhead.
+Indeed, any new method definition won't be visible to the current runtime environment,
+including Tasks and Threads (and any previously defined `@generated` functions).
+Let's start with an example to see what this means:
+
+```julia
+julia> function tryeval()
+ @eval newfun() = 1
+ newfun()
+ end
+tryeval (generic function with 1 method)
+
+julia> tryeval()
+ERROR: MethodError: no method matching newfun()
+The applicable method may be too new: running in world age xxxx1, while current world is xxxx2.
+Closest candidates are:
+ newfun() at none:1 (method too new to be called from this world context.)
+ in tryeval() at none:1
+ ...
+
+julia> newfun()
+1
+```
+
+In this example, observe that the new definition for `newfun` has been created,
+but can't be immediately called.
+The new global is immediately visible to the `tryeval` function,
+so you could write `return newfun` (without parentheses).
+But neither you, nor any of your callers, nor the functions they call, or etc.
+can call this new method definition!
+
+But there's an exception: future calls to `newfun` *from the REPL* work as expected,
+being able to both see and call the new definition of `newfun`.
+
+However, future calls to `tryeval` will continue to see the definition of `newfun` as it was
+*at the previous statement at the REPL*, and thus before that call to `tryeval`.
+
+You may want to try this for yourself to see how it works.
+
+The implementation of this behavior is a "world age counter".
+This monotonically increasing value tracks each method definition operation.
+This allows describing "the set of method definitions visible to a given runtime environment"
+as a single number, or "world age".
+It also allows comparing the methods available in two worlds just by comparing their ordinal value.
+In the example above, we see that the "current world" (in which the method `newfun()` exists),
+is one greater than the task-local "runtime world" that was fixed when the execution of `tryeval` started.
+
+Sometimes it is necessary to get around this (for example, if you are implementing the above REPL).
+Well, don't despair, since there's an easy solution: just call `eval` a second time.
+For example, here we create a zero-argument closure over `ans` and `eval` a call to it:
+
+```julia
+ julia> function tryeval2()
+ ans = (@eval newfun2() = 1)
+ res = eval(Expr(:call,
+ function()
+ return ans() + 1
+ end))
+ return res
+ end
+ tryeval2 (generic function with 1 method)
+
+ julia> tryeval2()
+ 2
+```
+
+Finally, let's take a look at some more complex examples where this rule comes into play.
+
+```julia
+ julia> # initially f(x) has one definition:
+
+ julia> f(x) = "original definition";
+
+ julia> # start some other operations that use f(x):
+
+ julia> g(x) = f(x);
+
+ julia> t = @async f(wait()); yield();
+
+ julia> # now we add some new definitions for f(x):
+
+ julia> f(x::Int) = "definition for Int";
+
+ julia> f(x::Type{Int}) = "definition for Type{Int}";
+
+ julia> # and compare how these results differ:
+
+ julia> f(1)
+ "definition for Int"
+
+ julia> g(1)
+ "definition for Int"
+
+ julia> wait(schedule(t, 1))
+ "original definition"
+
+ julia> t = @async f(wait()); yield();
+
+ julia> wait(schedule(t, 1))
+ "definition for Int"
+```
+
## Parametrically-constrained Varargs methods
Function parameters can also be used to constrain the number of arguments that may be supplied
diff --git a/src/alloc.c b/src/alloc.c
index a768ea66243d4..c3b5c376accf1 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -409,6 +409,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void)
li->inferred_const = NULL;
li->rettype = (jl_value_t*)jl_any_type;
li->sparam_vals = jl_emptysvec;
+ li->backedges = NULL;
li->fptr = NULL;
li->unspecialized_ducttape = NULL;
li->jlcall_api = 0;
@@ -418,6 +419,8 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void)
li->specTypes = NULL;
li->inInference = 0;
li->def = NULL;
+ li->min_world = 0;
+ li->max_world = 0;
return li;
}
@@ -457,8 +460,9 @@ STATIC_INLINE jl_value_t *jl_call_staged(jl_svec_t *sparam_vals, jl_method_insta
fptr.fptr = generator->fptr;
fptr.jlcall_api = generator->jlcall_api;
if (__unlikely(fptr.fptr == NULL || fptr.jlcall_api == 0)) {
- void *F = jl_compile_linfo(generator, (jl_code_info_t*)generator->inferred, &jl_default_cgparams).functionObject;
- fptr = jl_generate_fptr(generator, F);
+ size_t world = generator->def->min_world;
+ void *F = jl_compile_linfo(&generator, (jl_code_info_t*)generator->inferred, world, &jl_default_cgparams).functionObject;
+ fptr = jl_generate_fptr(generator, F, world);
}
assert(jl_svec_len(generator->def->sparam_syms) == jl_svec_len(sparam_vals));
if (fptr.jlcall_api == 1)
@@ -490,11 +494,14 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo)
int last_in = ptls->in_pure_callback;
jl_module_t *last_m = ptls->current_module;
jl_module_t *task_last_m = ptls->current_task->current_module;
+ size_t last_age = jl_get_ptls_states()->world_age;
assert(jl_svec_len(linfo->def->sparam_syms) == jl_svec_len(sparam_vals));
JL_TRY {
ptls->in_pure_callback = 1;
// need to eval macros in the right module
ptls->current_task->current_module = ptls->current_module = linfo->def->module;
+ // and the right world
+ ptls->world_age = generator->def->min_world;
ex = jl_exprn(lambda_sym, 2);
@@ -544,6 +551,7 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo)
jl_lineno = last_lineno;
ptls->current_module = last_m;
ptls->current_task->current_module = task_last_m;
+ ptls->world_age = last_age;
}
JL_CATCH {
ptls->in_pure_callback = last_in;
@@ -574,10 +582,12 @@ jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types,
new_linfo->def = m;
new_linfo->specTypes = types;
new_linfo->sparam_vals = sp;
+ new_linfo->min_world = m->min_world;
+ new_linfo->max_world = m->max_world;
return new_linfo;
}
-JL_DLLEXPORT void jl_method_set_source(jl_method_t *m, jl_code_info_t *src)
+static void jl_method_set_source(jl_method_t *m, jl_code_info_t *src)
{
uint8_t j;
uint8_t called = 0;
@@ -642,10 +652,13 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(void)
m->nargs = 0;
m->needs_sparam_vals_ducttape = 2;
m->traced = 0;
+ m->min_world = 1;
+ m->max_world = ~(size_t)0;
JL_MUTEX_INIT(&m->writelock);
return m;
}
+jl_array_t *jl_all_methods;
jl_method_t *jl_new_method(jl_code_info_t *definition,
jl_sym_t *name,
jl_tupletype_t *sig,
@@ -663,6 +676,7 @@ jl_method_t *jl_new_method(jl_code_info_t *definition,
JL_GC_PUSH1(&root);
jl_method_t *m = jl_new_method_uninit();
+ m->min_world = ++jl_world_counter;
m->isstaged = isstaged;
m->name = name;
m->sig = sig;
@@ -682,6 +696,17 @@ jl_method_t *jl_new_method(jl_code_info_t *definition,
m->unspecialized->inferred = (jl_value_t*)m->source;
m->source = NULL;
}
+
+#ifdef RECORD_METHOD_ORDER
+ if (jl_all_methods == NULL)
+ jl_all_methods = jl_alloc_vec_any(0);
+#endif
+ if (jl_all_methods != NULL) {
+ while (jl_array_len(jl_all_methods) < jl_world_counter)
+ jl_array_ptr_1d_push(jl_all_methods, NULL);
+ jl_array_ptr_1d_push(jl_all_methods, (jl_value_t*)m);
+ }
+
JL_GC_POP();
return m;
}
@@ -772,7 +797,7 @@ JL_DLLEXPORT jl_sym_t *jl_symbol_lookup(const char *str)
return symtab_lookup(&symtab, str, strlen(str), NULL);
}
-JL_DLLEXPORT jl_sym_t *jl_symbol_n(const char *str, int32_t len)
+JL_DLLEXPORT jl_sym_t *jl_symbol_n(const char *str, size_t len)
{
if (memchr(str, 0, len))
jl_exceptionf(jl_argumenterror_type, "Symbol name may not contain \\0");
@@ -844,6 +869,7 @@ JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *mo
mt->cache.unknown = jl_nothing;
mt->max_args = 0;
mt->kwsorter = NULL;
+ mt->backedges = NULL;
JL_MUTEX_INIT(&mt->writelock);
return mt;
}
@@ -862,9 +888,6 @@ JL_DLLEXPORT jl_typename_t *jl_new_typename_in(jl_sym_t *name, jl_module_t *modu
tn->names = NULL;
tn->hash = bitmix(bitmix(module ? module->uuid : 0, name->hash), 0xa1ada1da);
tn->mt = NULL;
- JL_GC_PUSH1(&tn);
- tn->mt = NULL;
- JL_GC_POP();
return tn;
}
diff --git a/src/ast.c b/src/ast.c
index d25a82759d6b9..531a8879bb52b 100644
--- a/src/ast.c
+++ b/src/ast.c
@@ -153,14 +153,15 @@ value_t fl_invoke_julia_macro(fl_context_t *fl_ctx, value_t *args, uint32_t narg
int i;
for(i=1; i < nargs; i++) margs[i] = scm_to_julia(fl_ctx, args[i], 1);
jl_value_t *result = NULL;
+ size_t world = jl_get_ptls_states()->world_age;
JL_TRY {
margs[0] = scm_to_julia(fl_ctx, args[0], 1);
margs[0] = jl_toplevel_eval(margs[0]);
- mfunc = jl_method_lookup(jl_gf_mtable(margs[0]), margs, nargs, 1);
+ mfunc = jl_method_lookup(jl_gf_mtable(margs[0]), margs, nargs, 1, world);
if (mfunc == NULL) {
JL_GC_POP();
- jl_method_error((jl_function_t*)margs[0], margs, nargs);
+ jl_method_error((jl_function_t*)margs[0], margs, nargs, world);
// unreachable
}
margs[nargs] = result = jl_call_method_internal(mfunc, margs, nargs);
@@ -720,6 +721,7 @@ jl_value_t *jl_parse_eval_all(const char *fname,
int last_lineno = jl_lineno;
const char *last_filename = jl_filename;
+ size_t last_age = jl_get_ptls_states()->world_age;
jl_lineno = 0;
jl_filename = fname;
jl_array_t *roots = NULL;
@@ -737,10 +739,12 @@ jl_value_t *jl_parse_eval_all(const char *fname,
JL_TIMING(LOWERING);
expansion = fl_applyn(fl_ctx, 1, symbol_value(symbol(fl_ctx, "jl-expand-to-thunk")), car_(ast));
}
+ jl_get_ptls_states()->world_age = jl_world_counter;
form = scm_to_julia(fl_ctx, expansion, 0);
jl_sym_t *head = NULL;
if (jl_is_expr(form)) head = ((jl_expr_t*)form)->head;
JL_SIGATOMIC_END();
+ jl_get_ptls_states()->world_age = jl_world_counter;
if (head == jl_incomplete_sym)
jl_errorf("syntax: %s", jl_string_data(jl_exprarg(form,0)));
else if (head == error_sym)
@@ -760,6 +764,7 @@ jl_value_t *jl_parse_eval_all(const char *fname,
result = jl_box_long(jl_lineno);
err = 1;
}
+ jl_get_ptls_states()->world_age = last_age;
jl_lineno = last_lineno;
jl_filename = last_filename;
fl_free_gc_handles(fl_ctx, 1);
diff --git a/src/builtin_proto.h b/src/builtin_proto.h
index 695916e7fdc15..0cd436157fc7d 100644
--- a/src/builtin_proto.h
+++ b/src/builtin_proto.h
@@ -22,7 +22,7 @@ extern "C" {
DECLARE_BUILTIN(throw); DECLARE_BUILTIN(is);
DECLARE_BUILTIN(typeof); DECLARE_BUILTIN(sizeof);
DECLARE_BUILTIN(issubtype); DECLARE_BUILTIN(isa);
-DECLARE_BUILTIN(typeassert); DECLARE_BUILTIN(_apply);
+DECLARE_BUILTIN(_apply); DECLARE_BUILTIN(_apply_pure);
DECLARE_BUILTIN(isdefined); DECLARE_BUILTIN(nfields);
DECLARE_BUILTIN(tuple); DECLARE_BUILTIN(svec);
DECLARE_BUILTIN(getfield); DECLARE_BUILTIN(setfield);
@@ -30,6 +30,7 @@ DECLARE_BUILTIN(fieldtype); DECLARE_BUILTIN(arrayref);
DECLARE_BUILTIN(arrayset); DECLARE_BUILTIN(arraysize);
DECLARE_BUILTIN(apply_type); DECLARE_BUILTIN(applicable);
DECLARE_BUILTIN(invoke); DECLARE_BUILTIN(_expr);
+DECLARE_BUILTIN(typeassert);
#ifdef __cplusplus
}
diff --git a/src/builtins.c b/src/builtins.c
index faa6d87df2156..721e6270d21ca 100644
--- a/src/builtins.c
+++ b/src/builtins.c
@@ -219,6 +219,7 @@ JL_DLLEXPORT void jl_enter_handler(jl_handler_t *eh)
#endif
eh->defer_signal = ptls->defer_signal;
eh->finalizers_inhibited = ptls->finalizers_inhibited;
+ eh->world_age = ptls->world_age;
current_task->eh = eh;
#ifdef ENABLE_TIMINGS
eh->timing_stack = current_task->timing_stack;
@@ -549,6 +550,32 @@ JL_CALLABLE(jl_f__apply)
return result;
}
+// this is like `_apply`, but with quasi-exact checks to make sure it is pure
+JL_CALLABLE(jl_f__apply_pure)
+{
+ jl_ptls_t ptls = jl_get_ptls_states();
+ int last_in = ptls->in_pure_callback;
+ jl_value_t *ret = NULL;
+ JL_TRY {
+ ptls->in_pure_callback = 1;
+ // because this function was declared pure,
+ // we should be allowed to run it in any world
+ // so we run it in the newest world;
+ // because, why not :)
+ // and `promote` works better this way
+ size_t last_age = ptls->world_age;
+ ptls->world_age = jl_world_counter;
+ ret = jl_f__apply(NULL, args, nargs);
+ ptls->world_age = last_age;
+ ptls->in_pure_callback = last_in;
+ }
+ JL_CATCH {
+ ptls->in_pure_callback = last_in;
+ jl_rethrow();
+ }
+ return ret;
+}
+
// eval -----------------------------------------------------------------------
JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex)
@@ -562,6 +589,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex)
jl_error("eval cannot be used in a generated function");
jl_value_t *v = NULL;
int last_lineno = jl_lineno;
+ size_t last_age = ptls->world_age;
jl_module_t *last_m = ptls->current_module;
jl_module_t *task_last_m = ptls->current_task->current_module;
if (jl_options.incremental && jl_generating_output()) {
@@ -574,6 +602,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex)
}
JL_TRY {
ptls->current_task->current_module = ptls->current_module = m;
+ ptls->world_age = jl_world_counter;
v = jl_toplevel_eval(ex);
}
JL_CATCH {
@@ -583,6 +612,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex)
jl_rethrow();
}
jl_lineno = last_lineno;
+ ptls->world_age = last_age;
ptls->current_module = last_m;
ptls->current_task->current_module = task_last_m;
assert(v);
@@ -980,7 +1010,8 @@ static void jl_check_type_tuple(jl_value_t *t, jl_sym_t *name, const char *ctx)
JL_CALLABLE(jl_f_applicable)
{
JL_NARGSV(applicable, 1);
- return jl_method_lookup(jl_gf_mtable(args[0]), args, nargs, 1) != NULL ?
+ size_t world = jl_get_ptls_states()->world_age;
+ return jl_method_lookup(jl_gf_mtable(args[0]), args, nargs, 1, world) != NULL ?
jl_true : jl_false;
}
@@ -1142,6 +1173,7 @@ void jl_init_primitives(void)
// internal functions
add_builtin_func("apply_type", jl_f_apply_type);
add_builtin_func("_apply", jl_f__apply);
+ add_builtin_func("_apply_pure", jl_f__apply_pure);
add_builtin_func("_expr", jl_f__expr);
add_builtin_func("svec", jl_f_svec);
diff --git a/src/ccall.cpp b/src/ccall.cpp
index b3e85fe2cd26d..530e1897d32d8 100644
--- a/src/ccall.cpp
+++ b/src/ccall.cpp
@@ -689,7 +689,10 @@ static jl_value_t* try_eval(jl_value_t *ex, jl_codectx_t *ctx, const char *failu
if (constant || jl_is_ssavalue(ex))
return constant;
JL_TRY {
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = ctx->world;
constant = jl_interpret_toplevel_expr_in(ctx->module, ex, ctx->source, ctx->linfo->sparam_vals);
+ jl_get_ptls_states()->world_age = last_age;
}
JL_CATCH {
if (compiletime)
diff --git a/src/codegen.cpp b/src/codegen.cpp
index c972b68aba6e9..48a36c7b77ee3 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -403,6 +403,7 @@ Function *juliapersonality_func;
#endif
static Function *diff_gc_total_bytes_func;
static Function *jlarray_data_owner_func;
+static Function *jlgetworld_func;
// placeholder functions
static Function *gcroot_func;
@@ -541,6 +542,7 @@ struct jl_codectx_t {
jl_method_instance_t *linfo;
jl_code_info_t *source;
jl_array_t *code;
+ size_t world;
const char *name;
StringRef file;
ssize_t *line;
@@ -556,6 +558,7 @@ struct jl_codectx_t {
CallInst *ptlsStates;
Value *signalPage;
+ Value *world_age_field;
bool debug_enabled;
bool is_inbounds{false};
@@ -821,7 +824,12 @@ void jl_dump_compiles(void *s)
// --- entry point ---
//static int n_emit=0;
-static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_info_t *src, jl_llvm_functions_t *declarations, const jl_cgparams_t *params);
+static std::unique_ptr emit_function(
+ jl_method_instance_t *lam,
+ jl_code_info_t *src,
+ size_t world,
+ jl_llvm_functions_t *declarations,
+ const jl_cgparams_t *params);
void jl_add_linfo_in_flight(StringRef name, jl_method_instance_t *linfo, const DataLayout &DL);
// this generates llvm code for the lambda info
@@ -829,23 +837,35 @@ void jl_add_linfo_in_flight(StringRef name, jl_method_instance_t *linfo, const D
// (and the shadow module), but doesn't yet compile
// or generate object code for it
extern "C"
-jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *src, const jl_cgparams_t *params)
+jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t **pli, jl_code_info_t *src, size_t world, const jl_cgparams_t *params)
{
JL_TIMING(CODEGEN);
+ jl_method_instance_t *li = *pli;
assert(jl_is_method_instance(li));
jl_llvm_functions_t decls = {};
// Step 1. See if it is already compiled,
// Get the codegen lock,
// And get the source
- if (!src) {
+ if (li->def == NULL) {
+ JL_LOCK(&codegen_lock);
+ src = (jl_code_info_t*)li->inferred;
+ decls = li->functionObjectsDecls;
+ if (decls.functionObject != NULL || !src || !jl_is_code_info(src) || li->jlcall_api == 2) {
+ JL_UNLOCK(&codegen_lock);
+ return decls;
+ }
+ }
+ else if (!src) {
// Step 1a. If the caller didn't provide the source,
// try to infer it for ourself
// first see if it is already compiled
decls = li->functionObjectsDecls;
if ((params->cached && decls.functionObject != NULL) || li->jlcall_api == 2) {
- return decls;
+ if (li->min_world <= world && li->max_world >= world)
+ return decls;
}
JL_LOCK(&codegen_lock);
+ assert(li->min_world <= world && li->max_world >= world);
decls = li->functionObjectsDecls;
if ((params->cached && decls.functionObject != NULL) || li->jlcall_api == 2) {
JL_UNLOCK(&codegen_lock);
@@ -856,7 +876,8 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *s
src = (jl_code_info_t*)li->inferred;
if (src) {
if (!jl_is_code_info(src)) {
- src = jl_type_infer(li, 0);
+ src = jl_type_infer(pli, world, 0);
+ li = *pli;
}
if (!src || li->jlcall_api == 2) {
JL_UNLOCK(&codegen_lock);
@@ -864,7 +885,7 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *s
}
}
else {
- // failed to compile
+ // declare a failure to compile
JL_UNLOCK(&codegen_lock);
return decls;
}
@@ -896,10 +917,19 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *s
// Step 3. actually do the work of emitting the function
std::unique_ptr m;
- Function *f = (Function*)decls.functionObject, *specf = (Function*)decls.specFunctionObject;
JL_TRY {
- m = emit_function(li, src, &li->functionObjectsDecls, params);
- decls = li->functionObjectsDecls;
+ jl_llvm_functions_t *pdecls;
+ if (!params->cached)
+ pdecls = &decls;
+ else if (li->min_world <= world && li->max_world >= world)
+ pdecls = &li->functionObjectsDecls;
+ else if (li->def == NULL)
+ pdecls = &li->functionObjectsDecls;
+ else
+ pdecls = &decls;
+ m = emit_function(li, src, world, pdecls, params);
+ if (world)
+ decls = li->functionObjectsDecls;
//n_emit++;
}
JL_CATCH {
@@ -913,12 +943,8 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *s
JL_UNLOCK(&codegen_lock); // Might GC
jl_rethrow_with_add("error compiling %s", jl_symbol_name(li->def ? li->def->name : anonymous_sym));
}
- if (!params->cached) {
- li->functionObjectsDecls.functionObject = f;
- li->functionObjectsDecls.specFunctionObject = specf;
- }
- f = (Function*)decls.functionObject;
- specf = (Function*)decls.specFunctionObject;
+ Function *f = (Function*)decls.functionObject;
+ Function *specf = (Function*)decls.specFunctionObject;
// Step 4. Prepare debug info to receive this function
// record that this function name came from this linfo,
@@ -940,7 +966,7 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *s
// Step 5. Add the result to the execution engine now
jl_finalize_module(m.release(), !toplevel);
- if (li->jlcall_api != 2) {
+ if (world && li->jlcall_api != 2) {
// if not inlineable, code won't be needed again
if (JL_DELETE_NON_INLINEABLE && jl_options.debug_level <= 1 &&
li->def && li->inferred && jl_is_code_info(li->inferred) &&
@@ -1084,7 +1110,6 @@ static jl_method_instance_t *jl_get_unspecialized(jl_method_instance_t *method)
if (def->unspecialized == NULL) {
JL_LOCK(&def->writelock);
if (def->unspecialized == NULL) {
- // XXX: use computed env rather than empty svec
def->unspecialized = jl_get_specialized(def, def->sig, jl_emptysvec);
jl_gc_wb(def, def->unspecialized);
}
@@ -1095,7 +1120,7 @@ static jl_method_instance_t *jl_get_unspecialized(jl_method_instance_t *method)
// this compiles li and emits fptr
extern "C"
-jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *_F)
+jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *_F, size_t world)
{
Function *F = (Function*)_F;
jl_generic_fptr_t fptr;
@@ -1117,33 +1142,36 @@ jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *_F)
return fptr;
}
jl_method_instance_t *unspec = NULL;
- if (li->def && !li->def->isstaged && li->def->unspecialized) {
- unspec = li->def->unspecialized;
- }
- if (!F || !jl_can_finalize_function(F)) {
- // can't compile F in the JIT right now,
- // so instead compile an unspecialized version
- // and return its fptr instead
- if (!unspec)
- unspec = jl_get_unspecialized(li); // get-or-create the unspecialized version to cache the result
- jl_code_info_t *src = unspec->def->isstaged ? jl_code_for_staged(unspec) : unspec->def->source;
- fptr.fptr = unspec->fptr;
- fptr.jlcall_api = unspec->jlcall_api;
- if (fptr.fptr && fptr.jlcall_api) {
- JL_UNLOCK(&codegen_lock);
- return fptr;
- }
- jl_llvm_functions_t decls = unspec->functionObjectsDecls;
- if (unspec == li) {
- // temporarily clear the decls so that it will compile our unspec version of src
- unspec->functionObjectsDecls.functionObject = NULL;
- unspec->functionObjectsDecls.specFunctionObject = NULL;
- }
- F = (Function*)jl_compile_linfo(unspec, src, &jl_default_cgparams).functionObject;
- if (unspec == li) {
- unspec->functionObjectsDecls = decls;
+ if (li->def) {
+ if (!li->def->isstaged && li->def->unspecialized) {
+ unspec = li->def->unspecialized;
+ }
+ if (!F || !jl_can_finalize_function(F)) {
+ // can't compile F in the JIT right now,
+ // so instead compile an unspecialized version
+ // and return its fptr instead
+ if (!unspec)
+ unspec = jl_get_unspecialized(li); // get-or-create the unspecialized version to cache the result
+ jl_code_info_t *src = unspec->def->isstaged ? jl_code_for_staged(unspec) : unspec->def->source;
+ fptr.fptr = unspec->fptr;
+ fptr.jlcall_api = unspec->jlcall_api;
+ if (fptr.fptr && fptr.jlcall_api) {
+ JL_UNLOCK(&codegen_lock);
+ return fptr;
+ }
+ jl_llvm_functions_t decls = unspec->functionObjectsDecls;
+ if (unspec == li) {
+ // temporarily clear the decls so that it will compile our unspec version of src
+ unspec->functionObjectsDecls.functionObject = NULL;
+ unspec->functionObjectsDecls.specFunctionObject = NULL;
+ }
+ assert(src);
+ F = (Function*)jl_compile_linfo(&unspec, src, unspec->min_world, &jl_default_cgparams).functionObject; // this does not change unspec
+ if (unspec == li) {
+ unspec->functionObjectsDecls = decls;
+ }
+ assert(jl_can_finalize_function(F));
}
- assert(jl_can_finalize_function(F));
}
assert(F);
fptr.fptr = (jl_fptr_t)getAddressForFunction(F);
@@ -1251,7 +1279,7 @@ void jl_extern_c(jl_function_t *f, jl_value_t *rt, jl_value_t *argt, char *name)
// this is paired with jl_dump_function_ir and jl_dump_function_asm in particular ways:
// misuse will leak memory or cause read-after-free
extern "C" JL_DLLEXPORT
-void *jl_get_llvmf_defn(jl_method_instance_t *linfo, bool getwrapper, bool optimize, const jl_cgparams_t params)
+void *jl_get_llvmf_defn(jl_method_instance_t *linfo, size_t world, bool getwrapper, bool optimize, const jl_cgparams_t params)
{
// `source` is `NULL` for generated functions.
// The `isstaged` check can be removed if that is not the case anymore.
@@ -1263,7 +1291,7 @@ void *jl_get_llvmf_defn(jl_method_instance_t *linfo, bool getwrapper, bool optim
jl_code_info_t *src = (jl_code_info_t*)linfo->inferred;
JL_GC_PUSH1(&src);
if (!src || !jl_is_code_info(src)) {
- src = jl_type_infer(linfo, 0);
+ src = jl_type_infer(&linfo, world, 0);
if (!src)
src = linfo->def->isstaged ? jl_code_for_staged(linfo) : linfo->def->source;
}
@@ -1279,7 +1307,7 @@ void *jl_get_llvmf_defn(jl_method_instance_t *linfo, bool getwrapper, bool optim
jl_llvm_functions_t declarations;
std::unique_ptr m;
JL_TRY {
- m = emit_function(linfo, src, &declarations, ¶ms);
+ m = emit_function(linfo, src, world, &declarations, ¶ms);
}
JL_CATCH {
// something failed!
@@ -1330,7 +1358,7 @@ void *jl_get_llvmf_defn(jl_method_instance_t *linfo, bool getwrapper, bool optim
extern "C" JL_DLLEXPORT
-void *jl_get_llvmf_decl(jl_method_instance_t *linfo, bool getwrapper, const jl_cgparams_t params)
+void *jl_get_llvmf_decl(jl_method_instance_t *linfo, size_t world, bool getwrapper, const jl_cgparams_t params)
{
// `source` is `NULL` for generated functions.
// The `isstaged` check can be removed if that is not the case anymore.
@@ -1340,7 +1368,7 @@ void *jl_get_llvmf_decl(jl_method_instance_t *linfo, bool getwrapper, const jl_c
}
// compile this normally
- jl_llvm_functions_t decls = jl_compile_for_dispatch(linfo);
+ jl_llvm_functions_t decls = jl_compile_for_dispatch(&linfo, world);
if (decls.functionObject == NULL && linfo->jlcall_api == 2 && linfo->def) {
// normally we don't generate native code for these functions, so need an exception here
@@ -1349,11 +1377,11 @@ void *jl_get_llvmf_decl(jl_method_instance_t *linfo, bool getwrapper, const jl_c
decls = linfo->functionObjectsDecls;
if (decls.functionObject == NULL) {
jl_code_info_t *src = NULL;
- src = jl_type_infer(linfo, 0);
+ src = jl_type_infer(&linfo, world, 0);
if (!src) {
src = linfo->def->isstaged ? jl_code_for_staged(linfo) : linfo->def->source;
}
- decls = jl_compile_linfo(linfo, src, ¶ms);
+ decls = jl_compile_linfo(&linfo, src, world, ¶ms);
linfo->functionObjectsDecls = decls;
}
JL_UNLOCK(&codegen_lock);
@@ -1366,37 +1394,6 @@ void *jl_get_llvmf_decl(jl_method_instance_t *linfo, bool getwrapper, const jl_c
}
-extern "C" JL_DLLEXPORT
-void *jl_get_llvmf(jl_tupletype_t *tt, bool getwrapper, bool getdeclarations)
-{ // DEPRECATED
- jl_method_instance_t *linfo = NULL, *temp = NULL;
- JL_GC_PUSH3(&linfo, &temp, &tt);
- if (tt != NULL) {
- linfo = jl_get_specialization1(tt);
- if (linfo == NULL) {
- linfo = jl_method_lookup_by_type(
- ((jl_datatype_t*)jl_tparam0(tt))->name->mt, tt, 0, 0, 1);
- if (linfo == NULL || jl_has_call_ambiguities(tt, linfo->def)) {
- JL_GC_POP();
- return NULL;
- }
- }
- }
- if (linfo == NULL) {
- // no function found for argument tuple type
- JL_GC_POP();
- return NULL;
- }
- void *f;
- if (getdeclarations)
- f = jl_get_llvmf_decl(linfo, getwrapper, jl_default_cgparams);
- else
- f = jl_get_llvmf_defn(linfo, getwrapper, true, jl_default_cgparams);
- JL_GC_POP();
- return f;
-}
-
-
// print an llvm IR acquired from jl_get_llvmf
// warning: this takes ownership of, and destroys, f->getParent()
extern "C" JL_DLLEXPORT
@@ -2968,7 +2965,7 @@ static jl_cgval_t emit_invoke(jl_expr_t *ex, jl_codectx_t *ctx)
if (lival.constant) {
jl_method_instance_t *li = (jl_method_instance_t*)lival.constant;
assert(jl_is_method_instance(li));
- jl_llvm_functions_t decls = jl_compile_linfo(li, NULL, ctx->params);
+ jl_llvm_functions_t decls = jl_compile_linfo(&li, NULL, ctx->world, ctx->params);
if (li->jlcall_api == 2) {
assert(li->inferred);
return mark_julia_const(li->inferred);
@@ -3390,6 +3387,10 @@ static void emit_stmtpos(jl_value_t *expr, jl_codectx_t *ctx)
ConstantInt::get(T_int32, jl_unbox_long(args[0])));
}
else {
+ if (ctx->linfo->def == NULL) {
+ Value *world = builder.CreateCall(prepare_call(jlgetworld_func));
+ builder.CreateStore(world, ctx->world_age_field);
+ }
(void)emit_expr(expr, ctx);
}
}
@@ -3634,8 +3635,16 @@ static void allocate_gc_frame(BasicBlock *b0, jl_codectx_t *ctx)
PointerType::get(T_psize, 0));
}
+static void emit_last_age_field(jl_codectx_t *ctx)
+{
+ ctx->world_age_field = builder.CreateGEP(
+ builder.CreateBitCast(ctx->ptlsStates, T_psize),
+ ConstantInt::get(T_size, offsetof(jl_tls_states_t, world_age) / sizeof(size_t)));
+}
+
static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_tupletype_t *argt,
- jl_typemap_entry_t *sf, jl_value_t *declrt, jl_tupletype_t *sigt)
+ jl_typemap_entry_t *sf, jl_value_t *declrt, jl_tupletype_t *sigt,
+ size_t world)
{
// Generate a c-callable wrapper
bool toboxed;
@@ -3662,15 +3671,15 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t
const char *name = "cfunction";
// try to look up this function for direct invoking
- jl_method_instance_t *lam = jl_get_specialization1((jl_tupletype_t*)sigt);
+ jl_method_instance_t *lam = jl_get_specialization1((jl_tupletype_t*)sigt, world);
jl_value_t *astrt = (jl_value_t*)jl_any_type;
// infer it first, if necessary
if (lam) {
name = jl_symbol_name(lam->def->name);
jl_code_info_t *src = NULL;
if (!lam->inferred) // TODO: this isn't ideal to be unconditionally calling type inference from here
- src = jl_type_infer(lam, 0);
- jl_compile_linfo(lam, src, &jl_default_cgparams);
+ src = jl_type_infer(&lam, world, 0);
+ jl_compile_linfo(&lam, src, world, &jl_default_cgparams);
if (lam->jlcall_api != 2) {
if (lam->functionObjectsDecls.functionObject == NULL ||
jl_jlcall_api(lam->functionObjectsDecls.functionObject) != 1) {
@@ -3713,10 +3722,20 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t
jl_codectx_t ctx = {};
ctx.f = cw;
ctx.linfo = lam;
+ ctx.code = NULL;
+ ctx.world = jl_world_counter;
ctx.sret = false;
ctx.spvals_ptr = NULL;
ctx.params = &jl_default_cgparams;
allocate_gc_frame(b0, &ctx);
+ emit_last_age_field(&ctx);
+ Value *dummy_world = builder.CreateAlloca(T_size);
+ Value *have_tls = builder.CreateICmpNE(ctx.ptlsStates, Constant::getNullValue(ctx.ptlsStates->getType()));
+ // TODO: in the future, try to initialize a full TLS context here
+ // for now, just use a dummy field to avoid a branch in this function
+ ctx.world_age_field = builder.CreateSelect(have_tls, ctx.world_age_field, dummy_world);
+ Value *last_age = tbaa_decorate(tbaa_gcframe, builder.CreateLoad(ctx.world_age_field));
+ builder.CreateStore(ConstantInt::get(T_size, world), ctx.world_age_field);
// Save the Function object reference
sf->func.value = jl_box_voidpointer((void*)cw_proto);
@@ -3944,6 +3963,7 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t
sret = true;
}
+ builder.CreateStore(last_age, ctx.world_age_field);
if (sret)
builder.CreateRetVoid();
else
@@ -4015,8 +4035,9 @@ static Function *jl_cfunction_object(jl_function_t *ff, jl_value_t *declrt, jl_t
cfunc_sig = (jl_value_t*)jl_apply_tuple_type((jl_svec_t*)cfunc_sig);
// check the cache
+ size_t world = jl_world_counter;
if (jl_cfunction_list.unknown != jl_nothing) {
- jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(jl_cfunction_list, (jl_tupletype_t*)cfunc_sig, NULL, 1, 0, /*offs*/0);
+ jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(jl_cfunction_list, (jl_tupletype_t*)cfunc_sig, NULL, 1, 0, /*offs*/0, world);
if (sf) {
Function *f = (Function*)jl_unbox_voidpointer(sf->func.value);
if (f) {
@@ -4026,7 +4047,7 @@ static Function *jl_cfunction_object(jl_function_t *ff, jl_value_t *declrt, jl_t
}
}
jl_typemap_entry_t *sf = jl_typemap_insert(&jl_cfunction_list, (jl_value_t*)jl_cfunction_list.unknown, (jl_tupletype_t*)cfunc_sig,
- jl_emptysvec, NULL, jl_emptysvec, NULL, /*offs*/0, &cfunction_cache, NULL);
+ jl_emptysvec, NULL, jl_emptysvec, NULL, /*offs*/0, &cfunction_cache, world, world, NULL);
// Backup the info for the nested compile
JL_LOCK(&codegen_lock);
@@ -4034,7 +4055,7 @@ static Function *jl_cfunction_object(jl_function_t *ff, jl_value_t *declrt, jl_t
DebugLoc olddl = builder.getCurrentDebugLocation();
bool last_n_c = nested_compile;
nested_compile = true;
- Function *f = gen_cfun_wrapper(ff, crt, (jl_tupletype_t*)argt, sf, declrt, (jl_tupletype_t*)sigt);
+ Function *f = gen_cfun_wrapper(ff, crt, (jl_tupletype_t*)argt, sf, declrt, (jl_tupletype_t*)sigt, world);
// Restore the previous compile context
builder.restoreIP(old);
builder.SetCurrentDebugLocation(olddl);
@@ -4074,6 +4095,8 @@ static Function *gen_jlcall_wrapper(jl_method_instance_t *lam, Function *f, bool
jl_codectx_t ctx = {};
ctx.f = w;
ctx.linfo = lam;
+ ctx.code = NULL;
+ ctx.world = 0;
ctx.sret = false;
ctx.spvals_ptr = NULL;
ctx.params = &jl_default_cgparams;
@@ -4126,7 +4149,12 @@ static Function *gen_jlcall_wrapper(jl_method_instance_t *lam, Function *f, bool
}
// Compile to LLVM IR, using a specialized signature if applicable.
-static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_info_t *src, jl_llvm_functions_t *declarations, const jl_cgparams_t *params)
+static std::unique_ptr emit_function(
+ jl_method_instance_t *lam,
+ jl_code_info_t *src,
+ size_t world,
+ jl_llvm_functions_t *declarations,
+ const jl_cgparams_t *params)
{
jl_ptls_t ptls = jl_get_ptls_states();
assert(declarations && "Capturing declarations is always required");
@@ -4146,6 +4174,7 @@ static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_
ctx.linfo = lam;
ctx.source = src;
ctx.code = code;
+ ctx.world = world;
ctx.name = jl_symbol_name(lam->def ? lam->def->name : anonymous_sym);
ctx.funcName = ctx.name;
ctx.vaSlot = -1;
@@ -4153,6 +4182,7 @@ static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_
ctx.params = params;
ctx.spvals_ptr = NULL;
ctx.nargs = lam->def ? lam->def->nargs : 0;
+ bool toplevel = lam->def == NULL;
// step 2. process var-info lists to see what vars need boxing
int n_ssavalues = jl_is_long(src->ssavaluetypes) ? jl_unbox_long(src->ssavaluetypes) : jl_array_len(src->ssavaluetypes);
@@ -4570,6 +4600,11 @@ static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_
// step 7. set up GC frame
allocate_gc_frame(b0, &ctx);
+ Value *last_age = NULL;
+ if (toplevel) {
+ emit_last_age_field(&ctx);
+ last_age = tbaa_decorate(tbaa_gcframe, builder.CreateLoad(ctx.world_age_field));
+ }
// step 8. allocate local variables slots
// must be in the first basic block for the llvm mem2reg pass to work
@@ -5093,6 +5128,8 @@ static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_
}
if (do_malloc_log(props.in_user_code) && props.line != -1)
mallocVisitLine(props.file, props.line);
+ if (toplevel)
+ builder.CreateStore(last_age, ctx.world_age_field);
if (type_is_ghost(retty) || ctx.sret)
builder.CreateRetVoid();
else
@@ -5680,6 +5717,7 @@ static void init_julia_llvm_env(Module *m)
builtin_func_map[jl_f_isa] = jlcall_func_to_llvm("jl_f_isa", &jl_f_isa, m);
builtin_func_map[jl_f_typeassert] = jlcall_func_to_llvm("jl_f_typeassert", &jl_f_typeassert, m);
builtin_func_map[jl_f__apply] = jlcall_func_to_llvm("jl_f__apply", &jl_f__apply, m);
+ builtin_func_map[jl_f__apply_pure] = jlcall_func_to_llvm("jl_f__apply_pure", &jl_f__apply_pure, m);
builtin_func_map[jl_f_throw] = jlcall_func_to_llvm("jl_f_throw", &jl_f_throw, m);
builtin_func_map[jl_f_tuple] = jlcall_func_to_llvm("jl_f_tuple", &jl_f_tuple, m);
builtin_func_map[jl_f_svec] = jlcall_func_to_llvm("jl_f_svec", &jl_f_svec, m);
@@ -5958,6 +5996,13 @@ static void init_julia_llvm_env(Module *m)
except_enter_func->addFnAttr(Attribute::ReturnsTwice);
add_named_global(except_enter_func, (void*)NULL, /*dllimport*/false);
+ jlgetworld_func =
+ Function::Create(FunctionType::get(T_size, ArrayRef(), false),
+ Function::ExternalLinkage,
+ "jl_get_world_counter", m);
+ jlgetworld_func->addFnAttr(Attribute::ReadOnly);
+ add_named_global(jlgetworld_func, jl_get_world_counter);
+
// set up optimization passes
#if JL_LLVM_VERSION >= 30700
// No DataLayout pass needed anymore.
diff --git a/src/dump.c b/src/dump.c
index 1acde727d2760..32f1c5c91c766 100644
--- a/src/dump.c
+++ b/src/dump.c
@@ -73,11 +73,11 @@ static htable_t fptr_to_id;
static const jl_fptr_t id_to_fptrs[] = {
NULL, NULL,
jl_f_throw, jl_f_is, jl_f_typeof, jl_f_issubtype, jl_f_isa,
- jl_f_typeassert, jl_f__apply, jl_f_isdefined, jl_f_tuple, jl_f_svec,
+ jl_f_typeassert, jl_f__apply, jl_f__apply_pure, jl_f_isdefined,
+ jl_f_tuple, jl_f_svec, jl_f_intrinsic_call,
jl_f_getfield, jl_f_setfield, jl_f_fieldtype, jl_f_nfields,
jl_f_arrayref, jl_f_arrayset, jl_f_arraysize, jl_f_apply_type,
jl_f_applicable, jl_f_invoke, jl_unprotect_stack, jl_f_sizeof, jl_f__expr,
- jl_f_intrinsic_call,
NULL };
static const intptr_t LongSymbol_tag = 23;
@@ -748,12 +748,14 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v)
arraylist_push(&reinit_list, (void*)pos);
arraylist_push(&reinit_list, (void*)1);
}
- if (s->mode == MODE_MODULE && jl_is_module(v)) {
- jl_module_t *m = (jl_module_t*)v;
- if (module_in_worklist(m) && !module_in_worklist(m->parent)) {
- // will need to reinsert this into parent bindings, later (in case of any errors during reinsert)
- arraylist_push(&reinit_list, (void*)pos);
- arraylist_push(&reinit_list, (void*)2);
+ if (s->mode == MODE_MODULE) {
+ if (jl_is_module(v)) {
+ jl_module_t *m = (jl_module_t*)v;
+ if (module_in_worklist(m) && !module_in_worklist(m->parent)) {
+ // will need to reinsert this into parent bindings, later (in case of any errors during reinsert)
+ arraylist_push(&reinit_list, (void*)pos);
+ arraylist_push(&reinit_list, (void*)2);
+ }
}
}
if (s->mode == MODE_MODULE) {
@@ -898,6 +900,13 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v)
write_int8(s->s, m->isstaged);
jl_serialize_value(s, (jl_value_t*)m->file);
write_int32(s->s, m->line);
+ if (s->mode != MODE_MODULE) {
+ write_int32(s->s, m->min_world);
+ write_int32(s->s, m->max_world);
+ }
+ else {
+ assert(m->max_world == ~(size_t)0 && "method replacement cannot be handled by incremental serializer");
+ }
jl_serialize_value(s, (jl_value_t*)m->tvars);
if (external_mt)
jl_serialize_value(s, jl_nothing);
@@ -917,10 +926,20 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v)
else if (jl_is_method_instance(v)) {
writetag(s->s, jl_method_instance_type);
jl_method_instance_t *li = (jl_method_instance_t*)v;
- int external = 0;
+ int internal = 0;
if (s->mode == MODE_MODULE) {
- external = li->def && !module_in_worklist(li->def->module);
- if (external) {
+ if (li->max_world == 0 && li->min_world == 0) {
+ internal = 1; // not world-tracked
+ }
+ else if (!li->def || module_in_worklist(li->def->module)) {
+ if (li->max_world == ~(size_t)0) {
+ internal = 2; // update world on deserialization
+ }
+ else {
+ internal = 3; // garbage object :(
+ }
+ }
+ if (!internal) {
// also flag this in the backref table as special
uintptr_t *bp = (uintptr_t*)ptrhash_bp(&backref_table, v);
assert(*bp != (uintptr_t)HT_NOTFOUND);
@@ -928,19 +947,24 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v)
}
}
jl_serialize_value(s, (jl_value_t*)li->specTypes);
- if (s->mode == MODE_MODULE && external)
+ if (s->mode == MODE_MODULE && !internal)
jl_serialize_value(s, (jl_value_t*)li->def->sig);
else
jl_serialize_value(s, (jl_value_t*)li->def);
if (s->mode == MODE_MODULE) {
- write_uint8(s->s, external);
- if (external)
+ write_uint8(s->s, internal);
+ if (!internal)
return;
}
jl_serialize_value(s, li->inferred);
jl_serialize_value(s, li->inferred_const);
jl_serialize_value(s, li->rettype);
jl_serialize_value(s, (jl_value_t*)li->sparam_vals);
+ jl_serialize_value(s, (jl_value_t*)li->backedges);
+ if (s->mode != MODE_MODULE) {
+ write_int32(s->s, li->min_world);
+ write_int32(s->s, li->max_world);
+ }
if (li->def) {
uint16_t id = jl_fptr_id((void*)(uintptr_t)li->fptr);
if (li->jlcall_api == 2) {
@@ -1066,38 +1090,66 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v)
}
}
-struct jl_serialize_methcache_from_mod_env {
- jl_serializer_state *s;
- jl_sym_t *name;
- jl_module_t *mod;
-};
+static void jl_serialize_missing_backedges_to_mod(jl_serializer_state *s, jl_methtable_t *mt)
+{
+ jl_array_t *backedges = mt->backedges;
+ if (backedges) {
+ size_t i, l = jl_array_len(backedges);
+ for (i = 1; i < l; i += 2) {
+ jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i);
+ if (caller->max_world == ~(size_t)0 && module_in_worklist(caller->def->module)) {
+ jl_serialize_value(s, caller);
+ jl_serialize_value(s, jl_array_ptr_ref(backedges, i - 1));
+ }
+ }
+ }
+}
+
+static int jl_serialize_backedges_to_mod(jl_typemap_entry_t *ml, void *closure)
+{
+ jl_serializer_state *s = (jl_serializer_state*)closure;
+ jl_method_instance_t *callee = ml->func.linfo;
+ jl_array_t *backedges = callee->backedges;
+ if (backedges) {
+ assert(callee->max_world == ~(size_t)0);
+ size_t i, l = jl_array_len(backedges);
+ for (i = 0; i < l; i++) {
+ jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i);
+ if (caller->max_world == ~(size_t)0 && module_in_worklist(caller->def->module)) {
+ jl_serialize_value(s, caller);
+ jl_serialize_value(s, callee);
+ }
+ }
+ }
+ return 1;
+}
static int jl_serialize_methcache_from_mod(jl_typemap_entry_t *ml, void *closure)
{
- struct jl_serialize_methcache_from_mod_env *env = (struct jl_serialize_methcache_from_mod_env*)closure;
- if (module_in_worklist(ml->func.method->module)) {
- jl_serialize_value(env->s, ml->func.method);
- jl_serialize_value(env->s, ml->simplesig);
+ jl_serializer_state *s = (jl_serializer_state*)closure;
+ jl_method_t *m = ml->func.method;
+ if (module_in_worklist(m->module)) {
+ jl_serialize_value(s, m);
+ jl_serialize_value(s, ml->simplesig);
+ }
+ else {
+ jl_typemap_visitor(m->specializations, jl_serialize_backedges_to_mod, closure);
}
return 1;
}
static void jl_serialize_methtable_from_mod(jl_serializer_state *s, jl_typename_t *tn)
{
- struct jl_serialize_methcache_from_mod_env env;
- env.s = s;
- env.mod = tn->module;
- env.name = tn->name;
- assert(tn->module);
- jl_typemap_visitor(tn->mt->defs, jl_serialize_methcache_from_mod, &env);
+ jl_typemap_visitor(tn->mt->defs, jl_serialize_methcache_from_mod, (void*)s);
}
static void jl_serialize_lambdas_from_mod(jl_serializer_state *s, jl_module_t *m)
{
- if (module_in_worklist(m)) return;
+ if (module_in_worklist(m))
+ return;
size_t i;
void **table = m->bindings.table;
- for(i=1; i < m->bindings.size; i+=2) {
+ for (i = 1; i < m->bindings.size; i += 2) {
if (table[i] != HT_NOTFOUND) {
jl_binding_t *b = (jl_binding_t*)table[i];
if (b->owner == m && b->value && b->constp) {
@@ -1105,8 +1157,11 @@ static void jl_serialize_lambdas_from_mod(jl_serializer_state *s, jl_module_t *m
jl_typename_t *tn = ((jl_datatype_t*)b->value)->name;
if (tn->module == m && tn->name == b->name) {
jl_methtable_t *mt = tn->mt;
- if (mt != NULL && (jl_value_t*)mt != jl_nothing && (mt != jl_type_type_mt || tn == jl_type_type->name)) {
+ if (mt != NULL &&
+ (jl_value_t*)mt != jl_nothing &&
+ (mt != jl_type_type_mt || tn == jl_type_type->name)) {
jl_serialize_methtable_from_mod(s, tn);
+ jl_serialize_missing_backedges_to_mod(s, mt);
}
}
}
@@ -1128,7 +1183,7 @@ static void write_mod_list(ios_t *s)
jl_module_t *m = jl_main_module;
size_t i;
void **table = m->bindings.table;
- for(i=1; i < m->bindings.size; i+=2) {
+ for (i = 1; i < m->bindings.size; i += 2) {
if (table[i] != HT_NOTFOUND) {
jl_binding_t *b = (jl_binding_t*)table[i];
if (b->owner == m &&
@@ -1197,8 +1252,11 @@ static void write_dependency_list(ios_t *s)
static jl_value_t *unique_func = NULL;
if (!unique_func)
unique_func = jl_get_global(jl_base_module, jl_symbol("unique"));
- jl_value_t *uniqargs[2] = {unique_func,(jl_value_t*)deps};
+ jl_value_t *uniqargs[2] = {unique_func, (jl_value_t*)deps};
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_world_counter;
jl_array_t *udeps = deps && unique_func ? (jl_array_t*)jl_apply(uniqargs, 2) : NULL;
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_PUSH1(&udeps);
if (udeps) {
@@ -1539,6 +1597,14 @@ static jl_value_t *jl_deserialize_value_method(jl_serializer_state *s, jl_value_
m->isstaged = read_int8(s->s);
m->file = (jl_sym_t*)jl_deserialize_value(s, NULL);
m->line = read_int32(s->s);
+ if (s->mode != MODE_MODULE) {
+ m->min_world = read_int32(s->s);
+ m->max_world = read_int32(s->s);
+ }
+ else {
+ m->min_world = jl_world_counter;
+ m->max_world = ~(size_t)0;
+ }
m->tvars = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&m->tvars);
jl_gc_wb(m, m->tvars);
m->ambig = jl_deserialize_value(s, (jl_value_t**)&m->ambig);
@@ -1585,9 +1651,10 @@ static jl_value_t *jl_deserialize_value_method_instance(jl_serializer_state *s,
if (li->def)
jl_gc_wb(li, li->def);
+ int internal = 0;
if (s->mode == MODE_MODULE) {
- int external = read_uint8(s->s);
- if (external) {
+ internal = read_uint8(s->s);
+ if (!internal) {
assert(loc != NULL && loc != HT_NOTFOUND);
arraylist_push(&flagref_list, loc);
arraylist_push(&flagref_list, (void*)pos);
@@ -1604,7 +1671,30 @@ static jl_value_t *jl_deserialize_value_method_instance(jl_serializer_state *s,
jl_gc_wb(li, li->rettype);
li->sparam_vals = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&li->sparam_vals);
jl_gc_wb(li, li->sparam_vals);
+ li->backedges = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&li->backedges);
+ if (li->backedges)
+ jl_gc_wb(li, li->backedges);
li->unspecialized_ducttape = NULL;
+ if (s->mode != MODE_MODULE) {
+ li->min_world = read_int32(s->s);
+ li->max_world = read_int32(s->s);
+ }
+ else if (internal == 1) {
+ li->min_world = 0;
+ li->max_world = 0;
+ }
+ else if (internal == 2) {
+ li->min_world = jl_world_counter;
+ li->max_world = ~(size_t)0;
+ }
+ else if (internal == 3) {
+ li->min_world = 1;
+ li->max_world = 0;
+ }
+ else {
+ assert(0 && "corrupt deserialization state");
+ abort();
+ }
li->functionObjectsDecls.functionObject = NULL;
li->functionObjectsDecls.specFunctionObject = NULL;
li->inInference = 0;
@@ -1774,6 +1864,16 @@ static jl_value_t *jl_deserialize_value_any(jl_serializer_state *s, jl_value_t *
tn->cache = jl_emptysvec; // the cache is refilled later (tag 5)
tn->linearcache = jl_emptysvec; // the cache is refilled later (tag 5)
}
+ if (dt == jl_typemap_entry_type) {
+ if (((jl_typemap_entry_t*)v)->max_world == ~(size_t)0) {
+ // update world validity to reflect current state of the counter
+ ((jl_typemap_entry_t*)v)->min_world = jl_world_counter;
+ }
+ else {
+ // garbage entry - delete it :(
+ ((jl_typemap_entry_t*)v)->min_world = ((jl_typemap_entry_t*)v)->max_world - 1;
+ }
+ }
}
}
return v;
@@ -1867,9 +1967,15 @@ static jl_value_t *jl_deserialize_value_(jl_serializer_state *s, jl_value_t *vta
typedef struct _linkedlist_t {
struct _linkedlist_t *next;
- struct {
- jl_method_t *meth;
- jl_tupletype_t *simpletype;
+ union {
+ struct {
+ jl_method_t *meth;
+ jl_tupletype_t *simpletype;
+ };
+ struct {
+ jl_method_instance_t *caller;
+ jl_value_t *callee;
+ };
} def[100];
size_t count;
} linkedlist_t;
@@ -1902,15 +2008,41 @@ static void jl_insert_methods(linkedlist_t *list)
while (list) {
size_t i;
for (i = 0; i < list->count; i++) {
- jl_method_t *meth = list->def[i].meth;
- jl_datatype_t *gf = jl_first_argument_datatype((jl_value_t*)meth->sig);
- assert(jl_is_datatype(gf) && gf->name->mt);
- jl_method_table_insert(gf->name->mt, meth, list->def[i].simpletype);
+ if (jl_is_method(list->def[i].meth)) {
+ jl_method_t *meth = list->def[i].meth;
+ jl_datatype_t *gf = jl_first_argument_datatype((jl_value_t*)meth->sig);
+ assert(jl_is_datatype(gf) && gf->name->mt);
+ jl_method_table_insert(gf->name->mt, meth, list->def[i].simpletype);
+ }
}
list = list->next;
}
}
+static void jl_insert_backedges(linkedlist_t *list)
+{
+ while (list) {
+ size_t i;
+ for (i = 0; i < list->count; i++) {
+ if (!jl_is_method(list->def[i].meth)) {
+ jl_method_instance_t *caller = list->def[i].caller;
+ assert(jl_is_method_instance(caller));
+ jl_value_t *callee = list->def[i].callee;
+ if (jl_is_method_instance(callee)) {
+ jl_method_instance_add_backedge((jl_method_instance_t*)callee, caller);
+ }
+ else {
+ jl_datatype_t *gf = jl_first_argument_datatype(callee);
+ assert(jl_is_datatype(gf) && gf->name->mt);
+ jl_method_table_add_backedge(gf->name->mt, callee, (jl_value_t*)caller);
+ }
+ }
+ }
+ list = list->next;
+ }
+}
+
+
static void free_linkedlist(linkedlist_t *list)
{
while (list) {
@@ -2151,6 +2283,7 @@ static void jl_save_system_image_to_stream(ios_t *f)
jl_serialize_value(&s, jl_main_module);
jl_serialize_value(&s, jl_top_module);
jl_serialize_value(&s, jl_typeinf_func);
+ write_uint64(f, jl_typeinf_world);
// deserialize method tables of builtin types
jl_serialize_value(&s, jl_type_type->name->mt);
@@ -2179,6 +2312,7 @@ static void jl_save_system_image_to_stream(ios_t *f)
write_int32(f, jl_get_t_uid_ctr());
write_int32(f, jl_get_gs_ctr());
+ write_int32(f, jl_world_counter);
jl_finalize_serializer(&s); // done with f and s
htable_reset(&backref_table, 0);
@@ -2207,7 +2341,6 @@ JL_DLLEXPORT ios_t *jl_create_system_image(void)
return f;
}
-extern jl_function_t *jl_typeinf_func;
extern int jl_boot_file_loaded;
extern void jl_get_builtins(void);
extern void jl_get_builtin_hooks(void);
@@ -2254,8 +2387,9 @@ static void jl_restore_system_image_from_stream(ios_t *f)
jl_main_module = (jl_module_t*)jl_deserialize_value(&s, NULL);
jl_top_module = (jl_module_t*)jl_deserialize_value(&s, NULL);
jl_internal_main_module = jl_main_module;
-
jl_typeinf_func = (jl_function_t*)jl_deserialize_value(&s, NULL);
+ jl_typeinf_world = read_uint64(f);
+
jl_type_type_mt = (jl_methtable_t*)jl_deserialize_value(&s, NULL);
jl_type_type->name->mt = jl_type_type_mt;
jl_typector_type->name->mt = jl_type_type_mt;
@@ -2289,6 +2423,7 @@ static void jl_restore_system_image_from_stream(ios_t *f)
int uid_ctr = read_int32(f);
int gs_ctr = read_int32(f);
+ jl_world_counter = read_int32(f);
jl_module_init_order = jl_finalize_deserializer(&s, NULL); // done with s and f
jl_set_t_uid_ctr(uid_ctr);
@@ -2631,7 +2766,7 @@ jl_method_t *jl_recache_method(jl_method_t *m, size_t start)
jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)sig);
jl_methtable_t *mt = ftype->name->mt;
jl_set_typeof(m, (void*)(intptr_t)0x30); // invalidate the old value to help catch errors
- jl_method_t *_new = (jl_method_t*)jl_methtable_lookup(mt, sig);
+ jl_method_t *_new = (jl_method_t*)jl_methtable_lookup(mt, sig, /*TODO*/jl_world_counter);
assert(_new && jl_is_method(_new));
jl_update_backref_list((jl_value_t*)m, (jl_value_t*)_new, start);
return _new;
@@ -2643,7 +2778,7 @@ jl_method_instance_t *jl_recache_method_instance(jl_method_instance_t *li, size_
jl_datatype_t *sig = (jl_datatype_t*)li->def;
jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)sig);
jl_methtable_t *mt = ftype->name->mt;
- jl_method_t *m = (jl_method_t*)jl_methtable_lookup(mt, sig);
+ jl_method_t *m = (jl_method_t*)jl_methtable_lookup(mt, sig, /*TODO*/jl_world_counter);
assert(m && jl_is_method(m));
jl_datatype_t *argtypes = li->specTypes;
@@ -2653,7 +2788,7 @@ jl_method_instance_t *jl_recache_method_instance(jl_method_instance_t *li, size_
//assert(ti != jl_bottom_type); (void)ti;
if (ti == jl_bottom_type)
env = jl_emptysvec; // the intersection may fail now if the type system had made an incorrect subtype env in the past
- jl_method_instance_t *_new = jl_specializations_get_linfo(m, argtypes, env);
+ jl_method_instance_t *_new = jl_specializations_get_linfo(m, argtypes, env, /*TODO*/jl_world_counter);
jl_update_backref_list((jl_value_t*)li, (jl_value_t*)_new, start);
return _new;
}
@@ -2721,6 +2856,7 @@ static jl_value_t *_jl_restore_incremental(ios_t *f)
arraylist_new(&flagref_list, 0);
int en = jl_gc_enable(0);
+ ++jl_world_counter; // reserve a world age for the deserialization
jl_serializer_state s = {
f, MODE_MODULE,
NULL, NULL,
@@ -2742,9 +2878,10 @@ static jl_value_t *_jl_restore_incremental(ios_t *f)
// at this point, the AST is fully reconstructed, but still completely disconnected
// now all of the interconnects will be created
jl_recache_types(); // make all of the types identities correct
- jl_recache_other(); // make all of the other objects identities correct
- init_order = jl_finalize_deserializer(&s, tracee_list); // done with f and s
- jl_insert_methods(&external_methods); // hook up methods of external generic functions
+ init_order = jl_finalize_deserializer(&s, tracee_list); // done with f and s (needs to be after recache types)
+ jl_insert_methods(&external_methods); // hook up methods of external generic functions (needs to be after recache types)
+ jl_recache_other(); // make all of the other objects identities correct (needs to be after insert methods)
+ jl_insert_backedges(&external_methods); // restore external backedges (needs to be after recache other)
free_linkedlist(external_methods.next);
serializer_worklist = NULL;
diff --git a/src/gc.c b/src/gc.c
index cd35e9d6419a0..83a22e9394466 100644
--- a/src/gc.c
+++ b/src/gc.c
@@ -105,7 +105,10 @@ static void run_finalizer(jl_ptls_t ptls, jl_value_t *o, jl_value_t *ff)
assert(!jl_typeis(ff, jl_voidpointer_type));
jl_value_t *args[2] = {ff,o};
JL_TRY {
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_world_counter;
jl_apply(args, 2);
+ jl_get_ptls_states()->world_age = last_age;
}
JL_CATCH {
jl_printf(JL_STDERR, "error in running finalizer: ");
@@ -1540,6 +1543,7 @@ void visit_mark_stack(jl_ptls_t ptls)
extern jl_array_t *jl_module_init_order;
extern jl_typemap_entry_t *call_cache[N_CALL_CACHE];
+extern jl_array_t *jl_all_methods;
// mark the initial root set
void pre_mark(jl_ptls_t ptls)
@@ -1572,6 +1576,8 @@ void pre_mark(jl_ptls_t ptls)
for (i = 0; i < N_CALL_CACHE; i++)
if (call_cache[i])
gc_push_root(ptls, call_cache[i], 0);
+ if (jl_all_methods != NULL)
+ gc_push_root(ptls, jl_all_methods, 0);
jl_mark_box_caches(ptls);
//gc_push_root(ptls, jl_unprotect_stack_func, 0);
diff --git a/src/gf.c b/src/gf.c
index adca9a84774cc..436a73994a9fe 100644
--- a/src/gf.c
+++ b/src/gf.c
@@ -24,9 +24,32 @@
extern "C" {
#endif
+size_t jl_world_counter = 1;
+JL_DLLEXPORT size_t jl_get_world_counter(void)
+{
+ return jl_world_counter;
+}
+
+JL_DLLEXPORT size_t jl_get_tls_world_age(void)
+{
+ return jl_get_ptls_states()->world_age;
+}
+
JL_DLLEXPORT jl_value_t *jl_invoke(jl_method_instance_t *meth, jl_value_t **args, uint32_t nargs)
{
- return jl_call_method_internal(meth, args, nargs);
+ if (meth->jlcall_api) {
+ return jl_call_method_internal(meth, args, nargs);
+ }
+ else {
+ // if this hasn't been inferred (compiled) yet,
+ // inferring it might not be able to handle the world range
+ // so we just do a generic apply here
+ // because that might actually be faster
+ // since it can go through the unrolled caches for this world
+ // and if inference is successful, this meth would get updated anyways,
+ // and we'll get the fast path here next time
+ return jl_apply(args, nargs);
+ }
}
/// ----- Handling for Julia callbacks ----- ///
@@ -117,34 +140,55 @@ static int8_t jl_cachearg_offset(jl_methtable_t *mt)
/// ----- Insertion logic for special entries ----- ///
// get or create the MethodInstance for a specialization
-JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams)
+JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams, size_t world)
{
+ assert(world >= m->min_world && world <= m->max_world && "typemap lookup is corrupted");
JL_LOCK(&m->writelock);
- jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->specializations, type, NULL, 2, /*subtype*/0, /*offs*/0);
+ jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(
+ m->specializations, type, NULL, 2, /*subtype*/0, /*offs*/0, world);
if (sf && jl_is_method_instance(sf->func.value)) {
+ jl_method_instance_t *linfo = (jl_method_instance_t*)sf->func.value;
+ assert(linfo->min_world <= sf->min_world && linfo->max_world >= sf->max_world);
JL_UNLOCK(&m->writelock);
- return (jl_method_instance_t*)sf->func.value;
+ return linfo;
}
jl_method_instance_t *li = jl_get_specialized(m, type, sparams);
JL_GC_PUSH1(&li);
// TODO: fuse lookup and insert steps
- jl_typemap_insert(&m->specializations, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &tfunc_cache, NULL);
+ // pick an initial world that is likely to be valid both before and after inference
+ if (world > jl_world_counter) {
+ li->min_world = jl_world_counter;
+ }
+ else {
+ li->min_world = world;
+ }
+ if (world == jl_world_counter) {
+ assert(m->max_world == ~(size_t)0 && "method validity shouldn't be scheduled to terminate at a fixed future age");
+ li->max_world = m->max_world;
+ }
+ else {
+ li->max_world = world;
+ }
+ jl_typemap_insert(&m->specializations, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &tfunc_cache,
+ li->min_world, li->max_world, NULL);
JL_UNLOCK(&m->writelock);
JL_GC_POP();
return li;
}
-JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_tupletype_t *type)
+JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_tupletype_t *type, size_t world)
{
- jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->specializations, type, NULL, 2, /*subtype*/0, /*offs*/0);
+ jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(
+ m->specializations, type, NULL, 2, /*subtype*/0, /*offs*/0, world);
if (!sf)
return jl_nothing;
return sf->func.value;
}
-JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_tupletype_t *type)
+JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_tupletype_t *type, size_t world)
{
- jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(mt->defs, type, NULL, 1, /*subtype*/0, /*offs*/0);
+ jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(
+ mt->defs, type, NULL, 1, /*subtype*/0, /*offs*/0, world);
if (!sf)
return jl_nothing;
return sf->func.value;
@@ -166,6 +210,8 @@ void jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_t fptr)
li->fptr = fptr;
li->jlcall_api = 1;
li->specTypes = jl_anytuple_type;
+ li->min_world = 1;
+ li->max_world = ~(size_t)0;
li->def = jl_new_method_uninit();
li->def->name = sname;
@@ -177,53 +223,187 @@ void jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_t fptr)
li->def->sparam_syms = jl_emptysvec;
jl_methtable_t *mt = dt->name->mt;
- jl_typemap_insert(&mt->cache, (jl_value_t*)mt, jl_anytuple_type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &lambda_cache, NULL);
+ jl_typemap_insert(&mt->cache, (jl_value_t*)mt, jl_anytuple_type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &lambda_cache, 1, ~(size_t)0, NULL);
}
// run type inference on lambda "li" for given argument types.
// returns the inferred source, and may cache the result in li
+// if successful, also updates the li argument to describe the validity of this src
// if inference doesn't occur (or can't finish), returns NULL instead
-jl_code_info_t *jl_type_infer(jl_method_instance_t *li, int force)
+jl_code_info_t *jl_type_infer(jl_method_instance_t **pli, size_t world, int force)
{
JL_TIMING(INFERENCE);
if (jl_typeinf_func == NULL)
return NULL;
jl_code_info_t *src = NULL;
#ifdef ENABLE_INFERENCE
+ jl_method_instance_t *li = *pli;
jl_module_t *mod = NULL;
if (li->def != NULL)
mod = li->def->module;
static int inInference = 0;
int lastIn = inInference;
+ size_t last_age = jl_get_ptls_states()->world_age;
inInference = 1;
if (force ||
- (mod != jl_gf_mtable(jl_typeinf_func)->module &&
+ (last_age != jl_typeinf_world &&
+ mod != jl_gf_mtable(jl_typeinf_func)->module &&
(mod != jl_core_module || !lastIn))) { // avoid any potential recursion in calling jl_typeinf_func on itself
- assert(li->inInference == 0);
- jl_value_t *fargs[2];
+ assert(li->inInference == 0 && "unexpectedly asked to infer a method that is already being inferred");
+ jl_value_t **fargs;
+ JL_GC_PUSHARGS(fargs, 3);
fargs[0] = (jl_value_t*)jl_typeinf_func;
fargs[1] = (jl_value_t*)li;
+ fargs[2] = jl_box_ulong(world);
#ifdef TRACE_INFERENCE
jl_printf(JL_STDERR,"inference on ");
jl_static_show_func_sig(JL_STDERR, (jl_value_t*)li->specTypes);
jl_printf(JL_STDERR, "\n");
#endif
- src = (jl_code_info_t *)jl_apply(fargs, 2);
- if (src == (void*)jl_nothing)
- src = NULL;
- assert(li->def || li->inInference == 0); // if this is toplevel expr, make sure inference finished
+ jl_get_ptls_states()->world_age = jl_typeinf_world;
+ jl_svec_t *linfo_src_rettype = (jl_svec_t*)jl_apply(fargs, 3);
+ jl_get_ptls_states()->world_age = last_age;
+ assert((li->def || li->inInference == 0) && "inference failed on a toplevel expr");
+ if (jl_is_svec(linfo_src_rettype) && jl_svec_len(linfo_src_rettype) == 3 &&
+ jl_is_method_instance(jl_svecref(linfo_src_rettype, 0)) &&
+ jl_is_code_info(jl_svecref(linfo_src_rettype, 1))) {
+ *pli = (jl_method_instance_t*)jl_svecref(linfo_src_rettype, 0);
+ src = (jl_code_info_t*)jl_svecref(linfo_src_rettype, 1);
+ }
+ JL_GC_POP();
}
inInference = lastIn;
#endif
return src;
}
-JL_DLLEXPORT void jl_set_lambda_rettype(jl_method_instance_t *li, jl_value_t *rettype, int32_t const_flags, jl_value_t *inferred_const, jl_value_t *inferred)
+
+static int jl_is_rettype_inferred(jl_method_instance_t *li)
+{
+ if (!li->inferred)
+ return 0;
+ if (jl_is_code_info(li->inferred) && !((jl_code_info_t*)li->inferred)->inferred)
+ return 0;
+ return 1;
+}
+
+
+struct set_world {
+ jl_method_instance_t *replaced;
+ size_t world;
+};
+static int set_max_world2(jl_typemap_entry_t *entry, void *closure0)
+{
+ struct set_world *closure = (struct set_world*)closure0;
+ // entry->max_world should be <= closure->replaced->max_world and >= closure->world
+ if (entry->func.linfo == closure->replaced) {
+ entry->max_world = closure->world;
+ }
+ return 1;
+}
+static int set_min_world2(jl_typemap_entry_t *entry, void *closure0)
{
+ struct set_world *closure = (struct set_world*)closure0;
+ // entry->min_world should be >= closure->replaced->min_world and >= closure->world
+ if (entry->func.linfo == closure->replaced) {
+ entry->min_world = closure->world;
+ }
+ return 1;
+}
+static void update_world_bound(jl_method_instance_t *replaced, jl_typemap_visitor_fptr fptr, size_t world)
+{
+ struct set_world update;
+ update.replaced = replaced;
+ update.world = world;
+
+ jl_method_t *m = replaced->def;
+ // update the world-valid in the specializations caches
+ jl_typemap_visitor(m->specializations, fptr, (void*)&update);
+ // update the world-valid in the invoke cache
+ if (m->invokes.unknown != NULL)
+ jl_typemap_visitor(m->invokes, fptr, (void*)&update);
+ // update the world-valid in the gf cache
+ jl_datatype_t *gf = jl_first_argument_datatype((jl_value_t*)m->sig);
+ assert(jl_is_datatype(gf) && gf->name->mt && "method signature invalid?");
+ jl_typemap_visitor(gf->name->mt->cache, fptr, (void*)&update);
+}
+
+
+JL_DLLEXPORT jl_method_instance_t* jl_set_method_inferred(
+ jl_method_instance_t *li, jl_value_t *rettype,
+ jl_value_t *inferred_const, jl_value_t *inferred,
+ int32_t const_flags, size_t min_world, size_t max_world)
+{
+ JL_GC_PUSH1(&li);
+ assert(min_world <= max_world && "attempting to set invalid world constraints");
+ assert(li->inInference && "shouldn't be caching an inference result for a MethodInstance that wasn't being inferred");
+ if (min_world != li->min_world || max_world != li->max_world) {
+ if (li->def == NULL) {
+ // thunks don't have multiple references, so just update in-place
+ li->min_world = min_world;
+ li->max_world = max_world;
+ }
+ else {
+ JL_LOCK(&li->def->writelock);
+ assert(min_world >= li->def->min_world && max_world <= li->def->max_world);
+ int isinferred = jl_is_rettype_inferred(li);
+ if (!isinferred && li->min_world >= min_world && li->max_world <= max_world) {
+ // expand the current (uninferred) entry to cover the full inferred range
+ // only update the specializations though, since the method table may have other
+ // reasons for needing a narrower applicability range
+ struct set_world update;
+ update.replaced = li;
+ if (li->min_world != min_world) {
+ li->min_world = min_world;
+ update.world = min_world;
+ jl_typemap_visitor(li->def->specializations, set_min_world2, (void*)&update);
+ }
+ if (li->max_world != max_world) {
+ li->max_world = max_world;
+ update.world = max_world;
+ jl_typemap_visitor(li->def->specializations, set_max_world2, (void*)&update);
+ }
+ }
+ else {
+ // clip applicability of old method instance (uninferred or inferred)
+ // to make it easier to find the inferred method
+ // (even though the real applicability was unchanged)
+ // there are 6(!) regions here to consider + boundary conditions for each
+ if (li->max_world >= min_world && li->min_world <= max_world) {
+ // there is a non-zero overlap between [li->min, li->max] and [min, max]
+ // there are now 4 regions left to consider
+ // TODO: also take into account li->def->world range when computing preferred division
+ if (li->max_world > max_world) {
+ // prefer making it applicable to future ages,
+ // as those are more likely to be useful
+ update_world_bound(li, set_min_world2, max_world + 1);
+ }
+ else if (li->min_world < min_world) {
+ assert(min_world > 1 && "logic violation: min(li->min_world) == 1 (by construction), so min(min_world) == 2");
+ update_world_bound(li, set_max_world2, min_world - 1);
+ }
+ else {
+ // old inferred li is fully covered by new inference result, so just delete it
+ assert(isinferred);
+ update_world_bound(li, set_max_world2, li->min_world - 1);
+ }
+ }
+
+ // build a new entry to describe the new (inferred) applicability
+ li = jl_get_specialized(li->def, li->specTypes, li->sparam_vals);
+ li->min_world = min_world;
+ li->max_world = max_world;
+ jl_typemap_insert(&li->def->specializations, (jl_value_t*)li->def,
+ li->specTypes, jl_emptysvec, NULL, jl_emptysvec,
+ (jl_value_t*)li, 0, &tfunc_cache,
+ li->min_world, li->max_world, NULL);
+ }
+ JL_UNLOCK(&li->def->writelock);
+ }
+ }
+
// changing rettype changes the llvm signature,
// so clear all of the llvm state at the same time
- assert(li->inInference);
- assert(!li->inferred || li->functionObjectsDecls.functionObject == NULL); // protect against some double-infer dataflow mistakes
li->functionObjectsDecls.functionObject = NULL;
li->functionObjectsDecls.specFunctionObject = NULL;
li->rettype = rettype;
@@ -238,20 +418,14 @@ JL_DLLEXPORT void jl_set_lambda_rettype(jl_method_instance_t *li, jl_value_t *re
li->inferred_const = inferred_const;
jl_gc_wb(li, inferred_const);
}
+ JL_GC_POP();
+ return li;
}
-static int jl_is_uninferred(jl_method_instance_t *li)
-{
- if (!li->inferred)
- return 1;
- if (jl_is_code_info(li->inferred) && !((jl_code_info_t*)li->inferred)->inferred)
- return 1;
- return 0;
-}
static int get_spec_unspec_list(jl_typemap_entry_t *l, void *closure)
{
- if (jl_is_method_instance(l->func.value) && jl_is_uninferred(l->func.linfo))
+ if (jl_is_method_instance(l->func.value) && !jl_is_rettype_inferred(l->func.linfo))
jl_array_ptr_1d_push((jl_array_t*)closure, l->func.value);
return 1;
}
@@ -262,6 +436,7 @@ static int get_method_unspec_list(jl_typemap_entry_t *def, void *closure)
return 1;
}
+
static void jl_reset_mt_caches(jl_module_t *m, jl_array_t *unspec)
{
// removes all method caches
@@ -294,20 +469,24 @@ static void jl_reset_mt_caches(jl_module_t *m, jl_array_t *unspec)
}
}
-jl_function_t *jl_typeinf_func=NULL;
+jl_function_t *jl_typeinf_func = NULL;
+size_t jl_typeinf_world = 0;
JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f)
{
jl_typeinf_func = (jl_function_t*)f;
+ jl_typeinf_world = jl_get_tls_world_age();
+ ++jl_world_counter; // make type-inference the only thing in this world
// give type inference a chance to see all of these
+ // TODO: also reinfer if max_world != ~(size_t)0
jl_array_t *unspec = jl_alloc_vec_any(0);
JL_GC_PUSH1(&unspec);
jl_reset_mt_caches(jl_main_module, unspec);
size_t i, l;
for (i = 0, l = jl_array_len(unspec); i < l; i++) {
jl_method_instance_t *li = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i);
- if (jl_is_uninferred(li))
- jl_type_infer(li, 1);
+ if (!jl_is_rettype_inferred(li))
+ jl_type_infer(&li, jl_world_counter, 1);
}
JL_GC_POP();
}
@@ -379,7 +558,8 @@ static jl_tupletype_t *join_tsig(jl_tupletype_t *tt, jl_tupletype_t *sig)
}
static jl_value_t *ml_matches(union jl_typemap_t ml, int offs,
- jl_tupletype_t *type, int lim, int include_ambiguous);
+ jl_tupletype_t *type, int lim, int include_ambiguous,
+ size_t world, size_t *min_valid, size_t *max_valid);
static void jl_cacheable_sig(
jl_tupletype_t *const type, // the specialized type signature for type lambda
@@ -619,6 +799,7 @@ static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t
jl_tupletype_t *type, // the specialized type signature for type lambda
jl_tupletype_t *tt, // the original tupletype of the signature
jl_typemap_entry_t *m,
+ size_t world,
jl_svec_t *sparams,
int allow_exec)
{
@@ -706,6 +887,8 @@ static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t
need_guard_entries = 1;
}
+ size_t min_valid = definition->min_world;
+ size_t max_valid = definition->max_world;
int cache_with_orig = 0;
jl_svec_t* guardsigs = jl_emptysvec;
jl_tupletype_t *origtype = type; // backup the prior value of `type`
@@ -714,7 +897,7 @@ static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t
temp2 = (jl_value_t*)type;
}
if (need_guard_entries) {
- temp = ml_matches(mt->defs, 0, type, -1, 0); // TODO: use MAX_UNSPECIALIZED_CONFLICTS?
+ temp = ml_matches(mt->defs, 0, type, -1, 0, world, &min_valid, &max_valid); // TODO: use MAX_UNSPECIALIZED_CONFLICTS?
int guards = 0;
if (temp == jl_false) {
cache_with_orig = 1;
@@ -755,18 +938,23 @@ static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t
guards = 0;
for(i = 0, l = jl_array_len(temp); i < l; i++) {
jl_value_t *m = jl_array_ptr_ref(temp, i);
- if (((jl_method_t*)jl_svecref(m,2)) != definition) {
+ jl_method_t *other = (jl_method_t*)jl_svecref(m, 2);
+ if (other != definition) {
jl_svecset(guardsigs, guards, (jl_tupletype_t*)jl_svecref(m, 0));
guards++;
//jl_typemap_insert(cache, parent, (jl_tupletype_t*)jl_svecref(m, 0),
- // jl_emptysvec, NULL, jl_emptysvec, /*guard*/NULL, jl_cachearg_offset(mt), &lambda_cache, NULL);
+ // jl_emptysvec, NULL, jl_emptysvec, /*guard*/NULL, jl_cachearg_offset(mt), &lambda_cache, other->min_world, other->max_world, NULL);
}
}
}
}
// here we infer types and specialize the method
- newmeth = jl_specializations_get_linfo(definition, type, sparams);
+ newmeth = jl_specializations_get_linfo(definition, type, sparams, world);
+ if (newmeth->min_world > min_valid)
+ min_valid = newmeth->min_world;
+ if (newmeth->max_world < max_valid)
+ max_valid = newmeth->max_world;
if (cache_with_orig) {
// if there is a need to cache with one of the original signatures,
@@ -804,7 +992,8 @@ static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t
}
}
- jl_typemap_insert(cache, parent, origtype, jl_emptysvec, type, guardsigs, (jl_value_t*)newmeth, jl_cachearg_offset(mt), &lambda_cache, NULL);
+ jl_typemap_insert(cache, parent, origtype, jl_emptysvec, type, guardsigs, (jl_value_t*)newmeth, jl_cachearg_offset(mt), &lambda_cache,
+ min_valid, max_valid, NULL);
if (definition->traced && jl_method_tracer && allow_exec)
jl_call_tracer(jl_method_tracer, (jl_value_t*)newmeth);
@@ -812,7 +1001,7 @@ static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t
return newmeth;
}
-static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t *tt, int cache, int inexact, int allow_exec)
+static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t *tt, int cache, int inexact, int allow_exec, size_t world)
{
// caller must hold the mt->writelock
jl_typemap_entry_t *entry = NULL;
@@ -821,7 +1010,7 @@ static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype
jl_tupletype_t *sig = NULL;
JL_GC_PUSH4(&env, &entry, &func, &sig);
- entry = jl_typemap_assoc_by_type(mt->defs, tt, &env, inexact, 1, 0);
+ entry = jl_typemap_assoc_by_type(mt->defs, tt, &env, inexact, 1, 0, world);
if (entry == NULL || entry == INEXACT_ENTRY) {
JL_GC_POP();
return NULL;
@@ -835,10 +1024,10 @@ static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype
sig = join_tsig(tt, entry->sig);
jl_method_instance_t *nf;
if (!cache) {
- nf = jl_specializations_get_linfo(m, sig, env);
+ nf = jl_specializations_get_linfo(m, sig, env, world);
}
else {
- nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, sig, tt, entry, env, allow_exec);
+ nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, sig, tt, entry, world, env, allow_exec);
}
JL_GC_POP();
return nf;
@@ -875,7 +1064,7 @@ struct ambiguous_matches_env {
struct typemap_intersection_env match;
union jl_typemap_t defs;
jl_typemap_entry_t *newentry;
- jl_array_t *shadowed;
+ jl_value_t *shadowed;
int after;
};
const int eager_ambiguity_printing = 0;
@@ -906,7 +1095,8 @@ static int check_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct typemap_
// now we are checking that the reverse is true
if (!jl_args_morespecific((jl_value_t*)(closure->after ? type : sig),
(jl_value_t*)(closure->after ? sig : type))) {
- jl_typemap_entry_t *l = jl_typemap_assoc_by_type(map, (jl_tupletype_t*)isect, NULL, 0, 0, 0);
+ jl_typemap_entry_t *l = jl_typemap_assoc_by_type(map, (jl_tupletype_t*)isect, NULL, 0, 0, 0,
+ closure->newentry->min_world);
if (l != NULL) // ok, intersection is covered
return 1;
jl_method_t *mambig = oldentry->func.method;
@@ -937,14 +1127,22 @@ static int check_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct typemap_
else if (closure->after) {
// record that this method definition is being partially replaced
if (closure->shadowed == NULL) {
- closure->shadowed = jl_alloc_vec_any(0);
+ closure->shadowed = oldentry->func.value;
+ }
+ else if (!jl_is_array(closure->shadowed)) {
+ jl_array_t *list = jl_alloc_vec_any(2);
+ jl_array_ptr_set(list, 0, closure->shadowed);
+ jl_array_ptr_set(list, 1, oldentry->func.value);
+ closure->shadowed = (jl_value_t*)list;
+ }
+ else {
+ jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, oldentry->func.value);
}
- jl_array_ptr_1d_push(closure->shadowed, oldentry->func.value);
}
return 1;
}
-static jl_array_t *check_ambiguous_matches(union jl_typemap_t defs,
+static jl_value_t *check_ambiguous_matches(union jl_typemap_t defs,
jl_typemap_entry_t *newentry)
{
jl_tupletype_t *type = newentry->sig;
@@ -991,73 +1189,130 @@ static void method_overwrite(jl_typemap_entry_t *newentry, jl_method_t *oldvalue
jl_printf(s, ".\n");
}
-// invalidate cached methods that overlap this definition
-static void flush_from_cache(jl_typemap_entry_t *entry);
-static void invalidate_conflicting(union jl_typemap_t *pml, jl_value_t *type, jl_value_t *parent, jl_array_t *shadowed)
-{
- jl_typemap_entry_t **pl;
- if (jl_typeof(pml->unknown) == (jl_value_t*)jl_typemap_level_type) {
- jl_typemap_level_t *cache = pml->node;
- if (cache->arg1.values != (void*)jl_nothing) {
- size_t i, l = jl_array_len(cache->arg1.values);
- union jl_typemap_t *d = (union jl_typemap_t*)jl_array_data(cache->arg1.values);
+static void update_max_args(jl_methtable_t *mt, jl_tupletype_t *type)
+{
+ size_t na = jl_nparams(type);
+ if (jl_va_tuple_kind(type) == JL_VARARG_UNBOUND)
+ na--;
+ if (na > mt->max_args)
+ mt->max_args = na;
+}
+
+
+// invalidate cached methods that had an edge to a replaced method
+static void invalidate_method_instance(jl_method_instance_t *replaced, size_t max_world)
+{
+ JL_LOCK_NOGC(&replaced->def->writelock);
+ jl_array_t *backedges = replaced->backedges;
+ if (replaced->max_world > max_world) {
+ // recurse to all backedges to update their valid range also
+ assert(replaced->min_world <= max_world && "attempting to set invalid world constraints");
+ replaced->max_world = max_world;
+ update_world_bound(replaced, set_max_world2, max_world);
+ if (backedges) {
+ size_t i, l = jl_array_len(backedges);
for (i = 0; i < l; i++) {
- union jl_typemap_t *pl = &d[i];
- if (pl->unknown != jl_nothing) {
- invalidate_conflicting(pl, type, (jl_value_t*)cache->arg1.values, shadowed);
- }
+ jl_method_instance_t *replaced = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i);
+ invalidate_method_instance(replaced, max_world);
}
}
- if (cache->targ.values != (void*)jl_nothing) {
- size_t i, l = jl_array_len(cache->targ.values);
- union jl_typemap_t *d = (union jl_typemap_t*)jl_array_data(cache->targ.values);
+ }
+ replaced->backedges = NULL;
+ JL_UNLOCK_NOGC(&replaced->def->writelock);
+}
+
+// invalidate cached methods that overlap this definition
+struct invalidate_conflicting_env {
+ struct typemap_intersection_env match;
+ size_t max_world;
+};
+static int invalidate_backedges(jl_typemap_entry_t *oldentry, struct typemap_intersection_env *closure0)
+{
+ struct invalidate_conflicting_env *closure = container_of(closure0, struct invalidate_conflicting_env, match);
+ if (oldentry->max_world > closure->max_world) {
+ struct set_world def;
+ def.replaced = oldentry->func.linfo;
+ def.world = closure->max_world;
+ jl_method_t *m = def.replaced->def;
+
+ // truncate the max-valid in the invoke cache
+ if (m->invokes.unknown != NULL)
+ jl_typemap_visitor(m->invokes, set_max_world2, (void*)&def);
+ // invalidate mt cache entries
+ jl_datatype_t *gf = jl_first_argument_datatype((jl_value_t*)m->sig);
+ assert(jl_is_datatype(gf) && gf->name->mt && "method signature invalid?");
+ jl_typemap_visitor(gf->name->mt->cache, set_max_world2, (void*)&def);
+
+ // invalidate backedges
+ JL_LOCK_NOGC(&def.replaced->def->writelock);
+ jl_array_t *backedges = def.replaced->backedges;
+ if (backedges) {
+ size_t i, l = jl_array_len(backedges);
+ jl_method_instance_t **replaced = (jl_method_instance_t**)jl_array_data(backedges);
for (i = 0; i < l; i++) {
- union jl_typemap_t *pl = &d[i];
- if (pl->unknown != jl_nothing) {
- invalidate_conflicting(pl, type, (jl_value_t*)cache->targ.values, shadowed);
- }
+ invalidate_method_instance(replaced[i], closure->max_world);
}
}
- pl = &cache->linear;
- parent = (jl_value_t*)cache;
+ def.replaced->backedges = NULL;
+ JL_UNLOCK_NOGC(&def.replaced->def->writelock);
+ }
+ return 1;
+}
+
+// add a backedge from callee to caller
+JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_method_instance_t *caller)
+{
+ assert(callee->min_world <= caller->min_world && callee->max_world >= caller->max_world);
+ JL_LOCK(&callee->def->writelock);
+ if (!callee->backedges) {
+ // lazy-init the backedges array
+ callee->backedges = jl_alloc_vec_any(1);
+ jl_gc_wb(callee, callee->backedges);
+ jl_array_ptr_set(callee->backedges, 0, caller);
}
else {
- pl = &pml->leaf;
- }
- jl_typemap_entry_t *l = *pl;
- size_t i, n = jl_array_len(shadowed);
- jl_value_t **d = jl_array_ptr_data(shadowed);
- while (l != (void*)jl_nothing) {
- int replaced = 0;
- for (i = 0; i < n; i++) {
- if (d[i] == (jl_value_t*)l->func.linfo->def) {
- replaced = jl_type_intersection(type, (jl_value_t*)l->sig) != (jl_value_t*)jl_bottom_type;
+ size_t i, l = jl_array_len(callee->backedges);
+ for (i = 0; i < l; i++) {
+ if (jl_array_ptr_ref(callee->backedges, i) == (jl_value_t*)caller)
break;
- }
- }
- if (replaced) {
- flush_from_cache(l);
- *pl = l->next;
- jl_gc_wb(parent, *pl);
}
- else {
- pl = &l->next;
- parent = (jl_value_t*)l;
+ if (i == l) {
+ jl_array_ptr_1d_push(callee->backedges, (jl_value_t*)caller);
}
- l = l->next;
}
+ JL_UNLOCK(&callee->def->writelock);
}
-static void update_max_args(jl_methtable_t *mt, jl_tupletype_t *type)
+// add a backedge from a non-existent signature to caller
+JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller)
{
- size_t na = jl_nparams(type);
- if (jl_va_tuple_kind(type) == JL_VARARG_UNBOUND)
- na--;
- if (na > mt->max_args)
- mt->max_args = na;
+ JL_LOCK(&mt->writelock);
+ if (!mt->backedges) {
+ // lazy-init the backedges array
+ mt->backedges = jl_alloc_vec_any(2);
+ jl_gc_wb(mt, mt->backedges);
+ jl_array_ptr_set(mt->backedges, 0, typ);
+ jl_array_ptr_set(mt->backedges, 1, caller);
+ }
+ else {
+ size_t i, l = jl_array_len(mt->backedges);
+ for (i = 1; i < l; i += 2) {
+ if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) {
+ if (jl_array_ptr_ref(mt->backedges, i) == caller) {
+ JL_UNLOCK(&mt->writelock);
+ return;
+ }
+ // reuse the already cached instance of this type
+ typ = jl_array_ptr_ref(mt->backedges, i - 1);
+ }
+ }
+ jl_array_ptr_1d_push(mt->backedges, typ);
+ jl_array_ptr_1d_push(mt->backedges, caller);
+ }
+ JL_UNLOCK(&mt->writelock);
}
-void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method, jl_tupletype_t *simpletype)
+JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method, jl_tupletype_t *simpletype)
{
assert(jl_is_method(method));
assert(jl_is_mtable(mt));
@@ -1065,42 +1320,89 @@ void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method, jl_tupletyp
jl_svec_t *tvars = method->tvars;
assert(jl_is_tuple_type(type));
jl_value_t *oldvalue = NULL;
+ struct invalidate_conflicting_env env;
+ env.max_world = method->min_world - 1;
JL_GC_PUSH1(&oldvalue);
JL_LOCK(&mt->writelock);
jl_typemap_entry_t *newentry = jl_typemap_insert(&mt->defs, (jl_value_t*)mt,
- type, tvars, simpletype, jl_emptysvec, (jl_value_t*)method, 0, &method_defs, &oldvalue);
+ type, tvars, simpletype, jl_emptysvec, (jl_value_t*)method, 0, &method_defs,
+ method->min_world, method->max_world, &oldvalue);
if (oldvalue) {
method->ambig = ((jl_method_t*)oldvalue)->ambig;
method_overwrite(newentry, (jl_method_t*)oldvalue);
- jl_array_t *shadowed = jl_alloc_vec_any(1);
- jl_array_ptr_set(shadowed, 0, oldvalue);
- oldvalue = (jl_value_t*)shadowed;
}
else {
- oldvalue = (jl_value_t*)check_ambiguous_matches(mt->defs, newentry);
+ oldvalue = check_ambiguous_matches(mt->defs, newentry);
+ if (mt->backedges) {
+ jl_value_t **backedges = jl_array_data(mt->backedges);
+ size_t i, na = jl_array_len(mt->backedges);
+ size_t ins = 0;
+ for (i = 1; i < na; i += 2) {
+ jl_value_t *backedgetyp = backedges[i - 1];
+ if (jl_type_intersection(backedgetyp, (jl_value_t*)type) != (jl_value_t*)jl_bottom_type) {
+ jl_method_instance_t *backedge = (jl_method_instance_t*)backedges[i];
+ invalidate_method_instance(backedge, env.max_world);
+ }
+ else {
+ backedges[ins++] = backedges[i - 1];
+ backedges[ins++] = backedges[i - 0];
+ }
+ }
+ if (ins == 0)
+ mt->backedges = NULL;
+ else
+ jl_array_del_end(mt->backedges, na - ins);
+ }
+ }
+ if (oldvalue) {
+ size_t l = jl_svec_len(type->parameters);
+ jl_value_t *va = NULL;
+ if (l > 0) {
+ va = jl_tparam(type, l - 1);
+ if (jl_is_vararg_type(va))
+ va = jl_tparam0(va);
+ else
+ va = NULL;
+ }
+ env.match.va = va;
+ env.match.type = (jl_value_t*)type;
+ env.match.fptr = invalidate_backedges;
+
+ if (jl_is_method(oldvalue)) {
+ jl_typemap_intersection_visitor(((jl_method_t*)oldvalue)->specializations, 0, &env.match);
+ }
+ else {
+ assert(jl_is_array(oldvalue));
+ jl_method_t **d = (jl_method_t**)jl_array_ptr_data(oldvalue);
+ size_t i, n = jl_array_len(oldvalue);
+ for (i = 0; i < n; i++) {
+ jl_typemap_intersection_visitor(d[i]->specializations, 0, &env.match);
+ }
+ }
}
- if (oldvalue)
- invalidate_conflicting(&mt->cache, (jl_value_t*)type, (jl_value_t*)mt, (jl_array_t*)oldvalue);
- JL_GC_POP();
update_max_args(mt, type);
JL_UNLOCK(&mt->writelock);
+ JL_GC_POP();
}
-void JL_NORETURN jl_method_error_bare(jl_function_t *f, jl_value_t *args)
+void JL_NORETURN jl_method_error_bare(jl_function_t *f, jl_value_t *args, size_t world)
{
- jl_ptls_t ptls = jl_get_ptls_states();
- jl_value_t *fargs[3] = {
- (jl_value_t*)jl_methoderror_type,
- (jl_value_t*)f,
- args
- };
- if (fargs[0]) {
- jl_throw(jl_apply_generic(fargs, 3));
+ if (jl_methoderror_type) {
+ jl_value_t *e = jl_new_struct_uninit(jl_methoderror_type);
+ struct jl_method_error {
+ jl_value_t *f;
+ jl_value_t *args;
+ size_t world;
+ } *pe = (void*)e,
+ ee = {f, args, world};
+ *pe = ee;
+ jl_throw(e);
}
else {
jl_printf((JL_STREAM*)STDERR_FILENO, "A method error occurred before the base MethodError type was defined. Aborting...\n");
- jl_static_show((JL_STREAM*)STDERR_FILENO,(jl_value_t*)f); jl_printf((JL_STREAM*)STDERR_FILENO,"\n");
+ jl_static_show((JL_STREAM*)STDERR_FILENO,(jl_value_t*)f); jl_printf((JL_STREAM*)STDERR_FILENO," world %u\n", (unsigned)world);
jl_static_show((JL_STREAM*)STDERR_FILENO,args); jl_printf((JL_STREAM*)STDERR_FILENO,"\n");
+ jl_ptls_t ptls = jl_get_ptls_states();
ptls->bt_size = rec_backtrace(ptls->bt_data, JL_MAX_BT_SIZE);
jl_critical_error(0, NULL, ptls->bt_data, &ptls->bt_size);
abort();
@@ -1108,11 +1410,11 @@ void JL_NORETURN jl_method_error_bare(jl_function_t *f, jl_value_t *args)
// not reached
}
-void JL_NORETURN jl_method_error(jl_function_t *f, jl_value_t **args, size_t na)
+void JL_NORETURN jl_method_error(jl_function_t *f, jl_value_t **args, size_t na, size_t world)
{
jl_value_t *argtup = jl_f_tuple(NULL, args+1, na-1);
JL_GC_PUSH1(&argtup);
- jl_method_error_bare(f, argtup);
+ jl_method_error_bare(f, argtup, world);
// not reached
}
@@ -1152,20 +1454,27 @@ jl_tupletype_t *arg_type_tuple(jl_value_t **args, size_t nargs)
}
jl_method_instance_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *types,
- int cache, int inexact, int allow_exec)
+ int cache, int inexact, int allow_exec, size_t world)
{
- jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(mt->cache, types, NULL, 0, 1, jl_cachearg_offset(mt));
- if (entry)
- return entry->func.linfo;
+ jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(mt->cache, types, NULL, 0, 1, jl_cachearg_offset(mt), world);
+ if (entry) {
+ jl_method_instance_t *linfo = (jl_method_instance_t*)entry->func.value;
+ assert(linfo->min_world <= entry->min_world && linfo->max_world >= entry->max_world &&
+ "typemap consistency error: MethodInstance doesn't apply to full range of its entry");
+ return linfo;
+ }
JL_LOCK(&mt->writelock);
- entry = jl_typemap_assoc_by_type(mt->cache, types, NULL, 0, 1, jl_cachearg_offset(mt));
+ entry = jl_typemap_assoc_by_type(mt->cache, types, NULL, 0, 1, jl_cachearg_offset(mt), world);
if (entry) {
+ jl_method_instance_t *linfo = (jl_method_instance_t*)entry->func.value;
+ assert(linfo->min_world <= entry->min_world && linfo->max_world >= entry->max_world &&
+ "typemap consistency error: MethodInstance doesn't apply to full range of its entry");
JL_UNLOCK(&mt->writelock);
- return entry->func.linfo;
+ return linfo;
}
if (jl_is_leaf_type((jl_value_t*)types))
cache = 1;
- jl_method_instance_t *sf = jl_mt_assoc_by_type(mt, types, cache, inexact, allow_exec);
+ jl_method_instance_t *sf = jl_mt_assoc_by_type(mt, types, cache, inexact, allow_exec, world);
if (cache) {
JL_UNLOCK(&mt->writelock);
}
@@ -1177,19 +1486,18 @@ jl_method_instance_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_
return sf;
}
-JL_DLLEXPORT int jl_method_exists(jl_methtable_t *mt, jl_tupletype_t *types)
+JL_DLLEXPORT int jl_method_exists(jl_methtable_t *mt, jl_tupletype_t *types, size_t world)
{
- return jl_method_lookup_by_type(mt, types, 0, 0, 1) != NULL;
+ return jl_method_lookup_by_type(mt, types, 0, 0, 1, world) != NULL;
}
-jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache)
+jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache, size_t world)
{
- jl_typemap_entry_t *entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt));
+ jl_typemap_entry_t *entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt), world);
if (entry)
return entry->func.linfo;
-
JL_LOCK(&mt->writelock);
- entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt));
+ entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt), world);
if (entry) {
JL_UNLOCK(&mt->writelock);
return entry->func.linfo;
@@ -1197,7 +1505,7 @@ jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, si
jl_tupletype_t *tt = arg_type_tuple(args, nargs);
jl_method_instance_t *sf = NULL;
JL_GC_PUSH2(&tt, &sf);
- sf = jl_mt_assoc_by_type(mt, tt, cache, 0, 1);
+ sf = jl_mt_assoc_by_type(mt, tt, cache, 0, 1, world);
if (cache) {
JL_UNLOCK(&mt->writelock);
}
@@ -1210,10 +1518,36 @@ jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, si
return sf;
}
-JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, int lim, int include_ambiguous);
+// return a Vector{Any} of svecs, each describing a method match:
+// Any[svec(tt, spvals, m), ...]
+// tt is the intersection of the type argument and the method signature,
+// spvals is any matched static parameter values, m is the Method,
+//
+// lim is the max # of methods to return. if there are more, returns jl_false.
+// -1 for no limit.
+JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, int lim, int include_ambiguous, size_t world, size_t *min_valid, size_t *max_valid)
+{
+ assert(jl_nparams(types) > 0);
+ jl_value_t *matches = NULL;
+ if (jl_tparam0(types) == jl_bottom_type) {
+ matches = (jl_value_t*)jl_alloc_vec_any(0);
+ }
+ else if (!jl_is_datatype(jl_tparam0(types))) {
+ return jl_false; // indeterminate - ml_matches can't deal with this case
+ }
+ else {
+ jl_methtable_t *mt = ((jl_datatype_t*)jl_tparam0(types))->name->mt;
+ if (mt == NULL)
+ matches = (jl_value_t*)jl_alloc_vec_any(0);
+ else
+ matches = ml_matches(mt->defs, 0, types, lim, include_ambiguous, world, min_valid, max_valid);
+ }
+ return matches;
+}
-jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t *li)
+jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t **pli, size_t world)
{
+ jl_method_instance_t *li = *pli;
if (li->jlcall_api == 2)
return li->functionObjectsDecls;
if (jl_options.compile_enabled == JL_OPTIONS_COMPILE_OFF ||
@@ -1251,21 +1585,22 @@ jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t *li)
return decls;
jl_code_info_t *src = NULL;
- if (li->def && jl_is_uninferred(li) && !li->inInference &&
+ if (li->def && !jl_is_rettype_inferred(li) && !li->inInference &&
jl_symbol_name(li->def->name)[0] != '@') {
// don't bother with typeinf on macros or toplevel thunks
// but try to infer everything else
- src = jl_type_infer(li, 0);
+ src = jl_type_infer(pli, world, 0);
+ li = *pli;
}
- // check again, because jl_type_infer may have compiled it
+ // check again, because jl_type_infer may have changed li or compiled it
decls = li->functionObjectsDecls;
if (decls.functionObject != NULL || li->jlcall_api == 2)
return decls;
- return jl_compile_linfo(li, src, &jl_default_cgparams);
+ return jl_compile_linfo(&li, src, world, &jl_default_cgparams);
}
// compile-time method lookup
-jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types)
+jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world)
{
JL_TIMING(METHOD_LOOKUP_COMPILE);
assert(jl_nparams(types) > 0);
@@ -1280,8 +1615,10 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types)
// if one argument type is DataType, multiple Type{} definitions
// might match. also be conservative with tuples rather than trying
// to analyze them in detail.
+ size_t min_valid = 0;
+ size_t max_valid = ~(size_t)0;
if (ti == (jl_value_t*)jl_datatype_type || jl_is_tuple_type(ti)) {
- jl_value_t *matches = jl_matching_methods(types, 1, 0);
+ jl_value_t *matches = jl_matching_methods(types, 1, 0, world, &min_valid, &max_valid);
if (matches == jl_false)
return NULL;
break;
@@ -1292,7 +1629,8 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types)
// most of the time sf is rooted in mt, but if the method is staged it may
// not be the case
// TODO: the above should be false, but better safe than sorry?
- jl_method_instance_t *sf = jl_method_lookup_by_type(mt, types, 1, 1, 1);
+ jl_method_instance_t *sf = jl_method_lookup_by_type(mt, types, 1, 1, 1, world);
+ assert(sf == NULL || (sf->min_world <= world && sf->max_world >= world));
JL_GC_PUSH1(&sf);
if (sf != NULL && jl_has_call_ambiguities(types, sf->def)) {
sf = NULL;
@@ -1303,20 +1641,21 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types)
JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types)
{
- jl_method_instance_t *li = jl_get_specialization1(types);
+ size_t world = jl_world_counter;
+ jl_method_instance_t *li = jl_get_specialization1(types, world);
if (li == NULL)
return 0;
jl_code_info_t *src = NULL;
- if (jl_is_uninferred(li))
- src = jl_type_infer(li, 0);
+ if (!jl_is_rettype_inferred(li))
+ src = jl_type_infer(&li, world, 0);
if (li->jlcall_api != 2)
- jl_compile_linfo(li, src, &jl_default_cgparams);
+ jl_compile_linfo(&li, src, world, &jl_default_cgparams);
return 1;
}
-JL_DLLEXPORT jl_value_t *jl_get_spec_lambda(jl_tupletype_t *types)
+JL_DLLEXPORT jl_value_t *jl_get_spec_lambda(jl_tupletype_t *types, size_t world)
{
- jl_method_instance_t *li = jl_get_specialization1(types);
+ jl_method_instance_t *li = jl_get_specialization1(types, world);
return li ? (jl_value_t*)li : jl_nothing;
}
@@ -1527,18 +1866,17 @@ static void _compile_all_deq(jl_array_t *found)
jl_method_t *m = ml->func.method;
jl_method_instance_t *linfo = m->unspecialized;
if (!linfo) {
- // XXX: use computed env rather than empty svec
- linfo = jl_specializations_get_linfo(m, ml->sig, jl_emptysvec);
+ linfo = jl_get_specialized(m, ml->sig, jl_emptysvec);
m->unspecialized = linfo;
jl_gc_wb(m, linfo);
}
- // infer this function now, if necessary
- if (linfo->jlcall_api == 2)
- continue;
- src = jl_type_infer(linfo, 1);
- if (linfo->jlcall_api == 2)
- continue;
+ //// infer this function now, if necessary
+ //if (linfo->jlcall_api == 2)
+ // continue;
+ //src = jl_type_infer(&linfo, jl_world_counter, 1);
+ //if (linfo->jlcall_api == 2)
+ // continue;
// keep track of whether all possible signatures have been cached (and thus whether it can skip trying to compile the template function)
// this is necessary because many intrinsics try to call static_eval and thus are not compilable unspecialized
@@ -1550,7 +1888,7 @@ static void _compile_all_deq(jl_array_t *found)
linfo->fptr = (jl_fptr_t)(uintptr_t)-1;
}
else {
- jl_compile_linfo(linfo, src, &jl_default_cgparams);
+ jl_compile_linfo(&linfo, src, jl_world_counter, &jl_default_cgparams);
assert(linfo->functionObjectsDecls.functionObject != NULL);
}
}
@@ -1757,15 +2095,6 @@ void call_cache_stats() {
}
#endif
-static void flush_from_cache(jl_typemap_entry_t *entry)
-{
- int i;
- for (i = 0; i < N_CALL_CACHE; i++) {
- if (call_cache[i] == entry)
- call_cache[i] = NULL;
- }
-}
-
#ifdef _COMPILER_MICROSOFT_
#define __builtin_return_address(n) _ReturnAddress()
#endif
@@ -1780,6 +2109,7 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
if (traceen)
show_call(args[0], &args[1], nargs-1);
#endif
+ size_t world = jl_get_ptls_states()->world_age;
/*
search order:
@@ -1808,7 +2138,8 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
for (i = 0; i < 4; i++) {
entry = call_cache[cache_idx[i]];
if (entry && nargs == jl_svec_len(entry->sig->parameters) &&
- sig_match_fast(args, jl_svec_data(entry->sig->parameters), 0, nargs)) {
+ sig_match_fast(args, jl_svec_data(entry->sig->parameters), 0, nargs) &&
+ world >= entry->min_world && world <= entry->max_world) {
break;
}
}
@@ -1817,7 +2148,7 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
JL_TIMING(METHOD_LOOKUP_FAST);
jl_value_t *F = args[0];
mt = jl_gf_mtable(F);
- entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt));
+ entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt), world);
if (entry && entry->isleafsig && entry->simplesig == (void*)jl_nothing && entry->guardsigs == jl_emptysvec) {
// put the entry into the cache if it's valid for a leaftype lookup,
// using pick_which to slightly randomize where it ends up
@@ -1831,7 +2162,7 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
}
else {
JL_LOCK(&mt->writelock);
- entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt));
+ entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt), world);
if (entry) {
mfunc = entry->func.linfo;
}
@@ -1840,7 +2171,7 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
JL_TIMING(METHOD_LOOKUP_SLOW);
jl_tupletype_t *tt = arg_type_tuple(args, nargs);
JL_GC_PUSH1(&tt);
- mfunc = jl_mt_assoc_by_type(mt, tt, 1, 0, 1);
+ mfunc = jl_mt_assoc_by_type(mt, tt, 1, 0, 1, world);
JL_GC_POP();
}
JL_UNLOCK(&mt->writelock);
@@ -1849,7 +2180,7 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
if (error_en)
show_call(F, args, nargs);
#endif
- jl_method_error((jl_function_t*)args[0], args, nargs);
+ jl_method_error((jl_function_t*)args[0], args, nargs, world);
// unreachable
}
}
@@ -1862,11 +2193,11 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs)
return verify_type(res);
}
-JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup(jl_datatype_t *types)
+JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup(jl_datatype_t *types, size_t world)
{
jl_methtable_t *mt = ((jl_datatype_t*)jl_tparam0(types))->name->mt;
jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(mt->defs, types, /*don't record env*/NULL,
- /*exact match*/0, /*subtype*/1, /*offs*/0);
+ /*exact match*/0, /*subtype*/1, /*offs*/0, world);
if (!entry)
return jl_nothing;
return (jl_value_t*)entry;
@@ -1883,6 +2214,7 @@ JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup(jl_datatype_t *types)
// NOTE: assumes argument type is a subtype of the lookup type.
jl_value_t *jl_gf_invoke(jl_tupletype_t *types0, jl_value_t **args, size_t nargs)
{
+ size_t world = jl_get_ptls_states()->world_age;
jl_svec_t *tpenv = jl_emptysvec;
jl_tupletype_t *newsig = NULL;
jl_tupletype_t *tt = NULL;
@@ -1892,10 +2224,10 @@ jl_value_t *jl_gf_invoke(jl_tupletype_t *types0, jl_value_t **args, size_t nargs
jl_value_t *gf = args[0];
types = (jl_datatype_t*)jl_argtype_with_function(gf, (jl_tupletype_t*)types0);
jl_methtable_t *mt = jl_gf_mtable(gf);
- jl_typemap_entry_t *entry = (jl_typemap_entry_t*)jl_gf_invoke_lookup(types);
+ jl_typemap_entry_t *entry = (jl_typemap_entry_t*)jl_gf_invoke_lookup(types, world);
if ((jl_value_t*)entry == jl_nothing) {
- jl_method_error_bare(gf, (jl_value_t*)types0);
+ jl_method_error_bare(gf, (jl_value_t*)types0, world);
// unreachable
}
@@ -1906,14 +2238,14 @@ jl_value_t *jl_gf_invoke(jl_tupletype_t *types0, jl_value_t **args, size_t nargs
jl_method_instance_t *mfunc = NULL;
jl_typemap_entry_t *tm = NULL;
if (method->invokes.unknown != NULL)
- tm = jl_typemap_assoc_exact(method->invokes, args, nargs, jl_cachearg_offset(mt));
+ tm = jl_typemap_assoc_exact(method->invokes, args, nargs, jl_cachearg_offset(mt), world);
if (tm) {
mfunc = tm->func.linfo;
}
else {
JL_LOCK(&method->writelock);
if (method->invokes.unknown != NULL)
- tm = jl_typemap_assoc_exact(method->invokes, args, nargs, jl_cachearg_offset(mt));
+ tm = jl_typemap_assoc_exact(method->invokes, args, nargs, jl_cachearg_offset(mt), world);
if (tm) {
mfunc = tm->func.linfo;
}
@@ -1931,7 +2263,7 @@ jl_value_t *jl_gf_invoke(jl_tupletype_t *types0, jl_value_t **args, size_t nargs
if (func->invokes.unknown == NULL)
func->invokes.unknown = jl_nothing;
- mfunc = cache_method(mt, &func->invokes, entry->func.value, sig, tt, entry, tpenv, 1);
+ mfunc = cache_method(mt, &func->invokes, entry->func.value, sig, tt, entry, world, tpenv, 1);
}
JL_UNLOCK(&method->writelock);
}
@@ -2025,8 +2357,14 @@ static int tvar_exists_at_top_level(jl_value_t *tv, jl_tupletype_t *sig, int att
struct ml_matches_env {
struct typemap_intersection_env match;
- jl_value_t *t; // results: array of svec(argtypes, params, Method)
+ // results:
+ jl_value_t *t; // array of svec(argtypes, params, Method)
+ size_t min_valid;
+ size_t max_valid;
+ // temporary:
jl_svec_t *matc; // current working svec
+ // inputs:
+ size_t world;
int lim;
int include_ambiguous; // whether ambiguous matches should be included
};
@@ -2034,6 +2372,27 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio
{
struct ml_matches_env *closure = container_of(closure0, struct ml_matches_env, match);
int i;
+ if (closure->world != 0) { // use zero as a flag value for returning all matches
+ // ignore method table entries that have been replaced in the current world
+ if (closure->world < ml->min_world) {
+ if (closure->max_valid >= ml->min_world)
+ closure->max_valid = ml->min_world - 1;
+ return 1;
+ }
+ else if (closure->world > ml->max_world) {
+ // ignore method table entries that are part of a later world
+ if (closure->min_valid <= ml->max_world)
+ closure->min_valid = ml->max_world + 1;
+ return 1;
+ }
+ else {
+ // intersect the env valid range with method's valid range
+ if (closure->min_valid < ml->min_world)
+ closure->min_valid = ml->min_world;
+ if (closure->max_valid > ml->max_world)
+ closure->max_valid = ml->max_world;
+ }
+ }
// a method is shadowed if type <: S <: m->sig where S is the
// signature of another applicable method
/*
@@ -2173,7 +2532,8 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio
// Returns a match as an array of svec(argtypes, static_params, Method).
// See below for the meaning of lim.
static jl_value_t *ml_matches(union jl_typemap_t defs, int offs,
- jl_tupletype_t *type, int lim, int include_ambiguous)
+ jl_tupletype_t *type, int lim, int include_ambiguous,
+ size_t world, size_t *min_valid, size_t *max_valid)
{
size_t l = jl_svec_len(type->parameters);
jl_value_t *va = NULL;
@@ -2194,31 +2554,17 @@ static jl_value_t *ml_matches(union jl_typemap_t defs, int offs,
env.matc = NULL;
env.lim = lim;
env.include_ambiguous = include_ambiguous;
+ env.world = world;
+ env.min_valid = *min_valid;
+ env.max_valid = *max_valid;
JL_GC_PUSH4(&env.t, &env.matc, &env.match.env, &env.match.ti);
jl_typemap_intersection_visitor(defs, offs, &env.match);
JL_GC_POP();
+ *min_valid = env.min_valid;
+ *max_valid = env.max_valid;
return env.t;
}
-// return a Vector{Any} of svecs, each describing a method match:
-// Any[svec(tt, spvals, m), ...]
-// tt is the intersection of the type argument and the method signature,
-// spvals is any matched static parameter values, m is the Method,
-//
-// lim is the max # of methods to return. if there are more, returns jl_false.
-// -1 for no limit.
-JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, int lim, int include_ambiguous)
-{
- assert(jl_nparams(types) > 0);
- if (jl_tparam0(types) == jl_bottom_type)
- return (jl_value_t*)jl_alloc_vec_any(0);
- assert(jl_is_datatype(jl_tparam0(types)));
- jl_methtable_t *mt = ((jl_datatype_t*)jl_tparam0(types))->name->mt;
- if (mt == NULL)
- return (jl_value_t*)jl_alloc_vec_any(0);
- return ml_matches(mt->defs, 0, types, lim, include_ambiguous);
-}
-
// TODO: separate the codegen and typeinf locks
// currently using a coarser lock seems like
// the best way to avoid acquisition priority
diff --git a/src/init.c b/src/init.c
index bffa565d11b60..b49703c0be759 100644
--- a/src/init.c
+++ b/src/init.c
@@ -203,7 +203,10 @@ JL_DLLEXPORT void jl_atexit_hook(int exitcode)
jl_value_t *f = jl_get_global(jl_base_module, jl_symbol("_atexit"));
if (f != NULL) {
JL_TRY {
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
jl_apply(&f, 1);
+ jl_get_ptls_states()->world_age = last_age;
}
JL_CATCH {
jl_printf(JL_STDERR, "\natexit hook threw an error: ");
diff --git a/src/interpreter.c b/src/interpreter.c
index d84191d90f5c3..0c3f9e7382032 100644
--- a/src/interpreter.c
+++ b/src/interpreter.c
@@ -29,7 +29,11 @@ int jl_is_toplevel_only_expr(jl_value_t *e);
jl_value_t *jl_interpret_toplevel_expr(jl_value_t *e)
{
- return eval(e, NULL);
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_world_counter;
+ jl_value_t *ret = eval(e, NULL);
+ jl_get_ptls_states()->world_age = last_age;
+ return ret;
}
jl_value_t *jl_interpret_toplevel_expr_in(jl_module_t *m, jl_value_t *e,
@@ -480,7 +484,10 @@ static jl_value_t *eval(jl_value_t *e, interpreter_state *s)
jl_value_t *jl_toplevel_eval_body(jl_array_t *stmts)
{
- return eval_body(stmts, NULL, 0, 1);
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_value_t *ret = eval_body(stmts, NULL, 0, 1);
+ jl_get_ptls_states()->world_age = last_age;
+ return ret;
}
static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start, int toplevel)
@@ -492,15 +499,17 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start,
while (1) {
if (i >= ns)
jl_error("`body` expression must terminate in `return`. Use `block` instead.");
- jl_value_t *stmt = jl_array_ptr_ref(stmts,i);
+ if (toplevel)
+ jl_get_ptls_states()->world_age = jl_world_counter;
+ jl_value_t *stmt = jl_array_ptr_ref(stmts, i);
if (jl_is_gotonode(stmt)) {
- i = jl_gotonode_label(stmt)-1;
+ i = jl_gotonode_label(stmt) - 1;
continue;
}
else if (jl_is_expr(stmt)) {
jl_sym_t *head = ((jl_expr_t*)stmt)->head;
if (head == return_sym) {
- jl_value_t *ex = jl_exprarg(stmt,0);
+ jl_value_t *ex = jl_exprarg(stmt, 0);
if (toplevel && jl_is_toplevel_only_expr(ex))
return jl_toplevel_eval(ex);
else
@@ -508,7 +517,7 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start,
}
else if (head == assign_sym) {
jl_value_t *sym = jl_exprarg(stmt, 0);
- jl_value_t *rhs = eval(jl_exprarg(stmt,1), s);
+ jl_value_t *rhs = eval(jl_exprarg(stmt, 1), s);
if (jl_is_ssavalue(sym)) {
ssize_t genid = ((jl_ssavalue_t*)sym)->id;
if (genid >= jl_source_nssavalues(s->src) || genid < 0)
@@ -537,9 +546,9 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start,
}
}
else if (head == goto_ifnot_sym) {
- jl_value_t *cond = eval(jl_exprarg(stmt,0), s);
+ jl_value_t *cond = eval(jl_exprarg(stmt, 0), s);
if (cond == jl_false) {
- i = jl_unbox_long(jl_exprarg(stmt, 1))-1;
+ i = jl_unbox_long(jl_exprarg(stmt, 1)) - 1;
continue;
}
else if (cond != jl_true) {
@@ -548,20 +557,20 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start,
}
else if (head == line_sym) {
if (toplevel)
- jl_lineno = jl_unbox_long(jl_exprarg(stmt,0));
+ jl_lineno = jl_unbox_long(jl_exprarg(stmt, 0));
// TODO: interpreted function line numbers
}
else if (head == enter_sym) {
jl_enter_handler(&__eh);
if (!jl_setjmp(__eh.eh_ctx,1)) {
- return eval_body(stmts, s, i+1, toplevel);
+ return eval_body(stmts, s, i + 1, toplevel);
}
else {
#ifdef _OS_WINDOWS_
if (ptls->exception_in_transit == jl_stackovf_exception)
_resetstkoflw();
#endif
- i = jl_unbox_long(jl_exprarg(stmt,0))-1;
+ i = jl_unbox_long(jl_exprarg(stmt, 0)) - 1;
continue;
}
}
@@ -582,11 +591,11 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start,
// TODO: interpreted function line numbers
}
else if (jl_is_newvarnode(stmt)) {
- jl_value_t *var = jl_fieldref(stmt,0);
+ jl_value_t *var = jl_fieldref(stmt, 0);
assert(jl_is_slot(var));
ssize_t n = jl_slot_number(var);
assert(n <= jl_source_nslots(s->src) && n > 0);
- s->locals[n-1] = NULL;
+ s->locals[n - 1] = NULL;
}
else {
eval(stmt, s);
@@ -652,7 +661,9 @@ jl_value_t *jl_interpret_toplevel_thunk(jl_code_info_t *src)
s.locals = locals;
s.module = ptls->current_module;
s.sparam_vals = jl_emptysvec;
+ size_t last_age = jl_get_ptls_states()->world_age;
jl_value_t *r = eval_body(stmts, &s, 0, 1);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
return r;
}
diff --git a/src/jl_uv.c b/src/jl_uv.c
index 2c9f312ae682a..6c8bbe3e90007 100644
--- a/src/jl_uv.c
+++ b/src/jl_uv.c
@@ -100,8 +100,12 @@ static void jl_uv_closeHandle(uv_handle_t *handle)
if (handle == (uv_handle_t*)JL_STDERR)
JL_STDERR = (JL_STREAM*)STDERR_FILENO;
// also let the client app do its own cleanup
- if (handle->type != UV_FILE && handle->data)
+ if (handle->type != UV_FILE && handle->data) {
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_world_counter;
jl_uv_call_close_callback((jl_value_t*)handle->data);
+ jl_get_ptls_states()->world_age = last_age;
+ }
if (handle == (uv_handle_t*)&signal_async)
return;
free(handle);
diff --git a/src/jlapi.c b/src/jlapi.c
index 7f02c82b4c7ad..06d7920d59835 100644
--- a/src/jlapi.c
+++ b/src/jlapi.c
@@ -59,7 +59,10 @@ JL_DLLEXPORT jl_value_t *jl_eval_string(const char *str)
jl_value_t *ast = jl_parse_input_line(str, strlen(str),
filename, strlen(filename));
JL_GC_PUSH1(&ast);
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
r = jl_toplevel_eval(ast);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
jl_exception_clear();
}
@@ -126,7 +129,10 @@ JL_DLLEXPORT jl_value_t *jl_call(jl_function_t *f, jl_value_t **args, int32_t na
argv[0] = (jl_value_t*)f;
for(int i=1; iworld_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
v = jl_apply(argv, nargs+1);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
jl_exception_clear();
}
@@ -141,7 +147,10 @@ JL_DLLEXPORT jl_value_t *jl_call0(jl_function_t *f)
jl_value_t *v;
JL_TRY {
JL_GC_PUSH1(&f);
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
v = jl_apply(&f, 1);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
jl_exception_clear();
}
@@ -158,7 +167,10 @@ JL_DLLEXPORT jl_value_t *jl_call1(jl_function_t *f, jl_value_t *a)
jl_value_t **argv;
JL_GC_PUSHARGS(argv, 2);
argv[0] = f; argv[1] = a;
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
v = jl_apply(argv, 2);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
jl_exception_clear();
}
@@ -175,7 +187,10 @@ JL_DLLEXPORT jl_value_t *jl_call2(jl_function_t *f, jl_value_t *a, jl_value_t *b
jl_value_t **argv;
JL_GC_PUSHARGS(argv, 3);
argv[0] = f; argv[1] = a; argv[2] = b;
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
v = jl_apply(argv, 3);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
jl_exception_clear();
}
@@ -193,7 +208,10 @@ JL_DLLEXPORT jl_value_t *jl_call3(jl_function_t *f, jl_value_t *a,
jl_value_t **argv;
JL_GC_PUSHARGS(argv, 4);
argv[0] = f; argv[1] = a; argv[2] = b; argv[3] = c;
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
v = jl_apply(argv, 4);
+ jl_get_ptls_states()->world_age = last_age;
JL_GC_POP();
jl_exception_clear();
}
diff --git a/src/jltypes.c b/src/jltypes.c
index e70a22a0809df..ef7a7ff0e4c2e 100644
--- a/src/jltypes.c
+++ b/src/jltypes.c
@@ -3600,12 +3600,13 @@ void jl_init_types(void)
jl_methtable_type->name->mt = jl_new_method_table(jl_methtable_type->name->name, ptls->current_module);
jl_methtable_type->super = jl_any_type;
jl_methtable_type->parameters = jl_emptysvec;
- jl_methtable_type->name->names = jl_svec(8, jl_symbol("name"), jl_symbol("defs"),
+ jl_methtable_type->name->names = jl_svec(9, jl_symbol("name"), jl_symbol("defs"),
jl_symbol("cache"), jl_symbol("max_args"),
jl_symbol("kwsorter"), jl_symbol("module"),
- jl_symbol(""), jl_symbol(""));
- jl_methtable_type->types = jl_svec(8, jl_sym_type, jl_any_type, jl_any_type, jl_any_type/*jl_long*/,
- jl_any_type, jl_any_type/*module*/, jl_any_type/*long*/, jl_any_type/*int32*/);
+ jl_symbol("backedges"), jl_symbol(""), jl_symbol(""));
+ jl_methtable_type->types = jl_svec(9, jl_sym_type, jl_any_type, jl_any_type, jl_any_type/*jl_long*/,
+ jl_any_type, jl_any_type/*module*/,
+ jl_any_type/*any vector*/, jl_any_type/*long*/, jl_any_type/*int32*/);
jl_methtable_type->uid = jl_assign_type_uid();
jl_methtable_type->instance = NULL;
jl_methtable_type->struct_decl = NULL;
@@ -3751,24 +3752,30 @@ void jl_init_types(void)
jl_typemap_entry_type =
jl_new_datatype(jl_symbol("TypeMapEntry"), jl_any_type, jl_emptysvec,
- jl_svec(9, jl_symbol("next"),
- jl_symbol("sig"),
- jl_symbol("tvars"),
- jl_symbol("simplesig"),
- jl_symbol("guardsigs"),
- jl_symbol("func"),
- jl_symbol("isleafsig"),
- jl_symbol("issimplesig"),
- jl_symbol("va")),
- jl_svec(9, jl_any_type, // Union{TypeMapEntry, Void}
- jl_type_type, // TupleType
- jl_any_type, // Union{SimpleVector{TypeVar}, TypeVar}
- jl_any_type, // TupleType
- jl_any_type, // SimpleVector{TupleType}
- jl_any_type, // Any
- jl_bool_type,
- jl_bool_type,
- jl_bool_type),
+ jl_svec(11,
+ jl_symbol("next"),
+ jl_symbol("sig"),
+ jl_symbol("tvars"),
+ jl_symbol("simplesig"),
+ jl_symbol("guardsigs"),
+ jl_symbol("min_world"),
+ jl_symbol("max_world"),
+ jl_symbol("func"),
+ jl_symbol("isleafsig"),
+ jl_symbol("issimplesig"),
+ jl_symbol("va")),
+ jl_svec(11,
+ jl_any_type, // Union{TypeMapEntry, Void}
+ jl_type_type, // TupleType
+ jl_any_type, // Union{SimpleVector{TypeVar}, TypeVar}
+ jl_any_type, // TupleType
+ jl_any_type, // SimpleVector{TupleType}
+ jl_long_type, // Int
+ jl_long_type, // Int
+ jl_any_type, // Any
+ jl_bool_type,
+ jl_bool_type,
+ jl_bool_type),
0, 1, 5);
jl_function_type = jl_new_abstracttype((jl_value_t*)jl_symbol("Function"), jl_any_type, jl_emptysvec);
@@ -3854,9 +3861,6 @@ void jl_init_types(void)
jl_svec(2, jl_symbol("mod"), jl_symbol("name")),
jl_svec(2, jl_module_type, jl_sym_type), 0, 0, 2);
- jl_svecset(jl_typename_type->types, 1, jl_module_type);
- jl_svecset(jl_methtable_type->types, 5, jl_module_type);
-
jl_code_info_type =
jl_new_datatype(jl_symbol("CodeInfo"),
jl_any_type, jl_emptysvec,
@@ -3885,13 +3889,15 @@ void jl_init_types(void)
jl_method_type =
jl_new_datatype(jl_symbol("Method"),
jl_any_type, jl_emptysvec,
- jl_svec(18,
+ jl_svec(20,
jl_symbol("name"),
jl_symbol("module"),
jl_symbol("file"),
jl_symbol("line"),
jl_symbol("sig"),
jl_symbol("tvars"),
+ jl_symbol("min_world"),
+ jl_symbol("max_world"),
jl_symbol("ambig"),
jl_symbol("specializations"),
jl_symbol("sparam_syms"),
@@ -3904,15 +3910,17 @@ void jl_init_types(void)
jl_symbol("isva"),
jl_symbol("isstaged"),
jl_symbol("needs_sparam_vals_ducttape")),
- jl_svec(18,
+ jl_svec(20,
jl_sym_type,
jl_module_type,
jl_sym_type,
jl_int32_type,
jl_type_type,
- jl_any_type,
+ jl_any_type, // Union{TypeVar, SimpleVector}
+ jl_long_type,
+ jl_long_type,
jl_any_type, // Union{Array, Void}
- jl_any_type,
+ jl_any_type, // TypeMap
jl_simplevector_type,
jl_code_info_type,
jl_any_type, // jl_method_instance_type
@@ -3923,31 +3931,37 @@ void jl_init_types(void)
jl_bool_type,
jl_bool_type,
jl_bool_type),
- 0, 1, 9);
+ 0, 1, 11);
jl_method_instance_type =
jl_new_datatype(jl_symbol("MethodInstance"),
jl_any_type, jl_emptysvec,
- jl_svec(13,
+ jl_svec(16,
jl_symbol("specTypes"),
jl_symbol("rettype"),
jl_symbol("sparam_vals"),
+ jl_symbol("backedges"),
jl_symbol("inferred"),
jl_symbol("inferred_const"),
jl_symbol("def"),
+ jl_symbol("min_world"),
+ jl_symbol("max_world"),
jl_symbol("inInference"),
jl_symbol("jlcall_api"),
jl_symbol(""),
jl_symbol("fptr"),
jl_symbol("unspecialized_ducttape"),
jl_symbol(""), jl_symbol("")),
- jl_svec(13,
+ jl_svec(16,
jl_any_type,
jl_any_type,
jl_simplevector_type,
jl_any_type,
jl_any_type,
+ jl_any_type,
jl_method_type,
+ jl_long_type,
+ jl_long_type,
jl_bool_type,
jl_uint8_type,
jl_bool_type,
@@ -4007,19 +4021,22 @@ void jl_init_types(void)
jl_svecset(jl_datatype_type->types, 16, jl_bool_type);
jl_svecset(jl_tvar_type->types, 3, jl_bool_type);
jl_svecset(jl_simplevector_type->types, 0, jl_long_type);
+ jl_svecset(jl_typename_type->types, 1, jl_module_type);
jl_svecset(jl_typename_type->types, 6, jl_long_type);
jl_svecset(jl_methtable_type->types, 3, jl_long_type);
+ jl_svecset(jl_methtable_type->types, 5, jl_module_type);
+ jl_svecset(jl_methtable_type->types, 6, jl_array_any_type);
#ifdef __LP64__
- jl_svecset(jl_methtable_type->types, 6, jl_int64_type); // unsigned long
+ jl_svecset(jl_methtable_type->types, 7, jl_int64_type); // unsigned long
#else
- jl_svecset(jl_methtable_type->types, 6, jl_int32_type); // DWORD
+ jl_svecset(jl_methtable_type->types, 7, jl_int32_type); // DWORD
#endif
- jl_svecset(jl_methtable_type->types, 7, jl_int32_type); // uint32_t
- jl_svecset(jl_method_type->types, 10, jl_method_instance_type);
- jl_svecset(jl_method_instance_type->types, 9, jl_voidpointer_type);
- jl_svecset(jl_method_instance_type->types, 10, jl_voidpointer_type);
- jl_svecset(jl_method_instance_type->types, 11, jl_voidpointer_type);
+ jl_svecset(jl_methtable_type->types, 8, jl_int32_type); // uint32_t
+ jl_svecset(jl_method_type->types, 12, jl_method_instance_type);
jl_svecset(jl_method_instance_type->types, 12, jl_voidpointer_type);
+ jl_svecset(jl_method_instance_type->types, 13, jl_voidpointer_type);
+ jl_svecset(jl_method_instance_type->types, 14, jl_voidpointer_type);
+ jl_svecset(jl_method_instance_type->types, 15, jl_voidpointer_type);
jl_compute_field_offsets(jl_datatype_type);
jl_compute_field_offsets(jl_typename_type);
diff --git a/src/julia.h b/src/julia.h
index c767ae4e094e2..1e85fb092f11a 100644
--- a/src/julia.h
+++ b/src/julia.h
@@ -230,8 +230,11 @@ typedef struct _jl_method_t {
// method's type signature. redundant with TypeMapEntry->specTypes
jl_tupletype_t *sig;
- // bound type variables (static parameters). redundant with TypeMapEntry->tvars
+ // bound type variables (static parameters)
jl_svec_t *tvars;
+ size_t min_world;
+ size_t max_world;
+
// list of potentially-ambiguous methods (nothing = none, Vector{Any} of Methods otherwise)
jl_value_t *ambig;
@@ -272,9 +275,12 @@ typedef struct _jl_method_instance_t {
jl_tupletype_t *specTypes; // argument types this was specialized for
jl_value_t *rettype; // return type for fptr
jl_svec_t *sparam_vals; // the values for the tvars, indexed by def->sparam_syms
+ jl_array_t *backedges;
jl_value_t *inferred; // inferred jl_code_info_t, or value of the function if jlcall_api == 2, or null
jl_value_t *inferred_const; // inferred constant return value, or null
jl_method_t *def; // method this is specialized from, null if this is a toplevel thunk
+ size_t min_world;
+ size_t max_world;
uint8_t inInference; // flags to tell if inference is running on this function
uint8_t jlcall_api; // the c-abi for fptr; 0 = jl_fptr_t, 1 = jl_fptr_sparam_t, 2 = constval
uint8_t compile_traced; // if set will notify callback if this linfo is compiled
@@ -423,6 +429,8 @@ typedef struct _jl_typemap_entry_t {
jl_svec_t *tvars; // the bound type variables for sig
jl_tupletype_t *simplesig; // a simple signature for fast rejection
jl_svec_t *guardsigs;
+ size_t min_world;
+ size_t max_world;
union {
jl_value_t *value;
jl_method_instance_t *linfo; // [nullable] for guard entries
@@ -458,6 +466,7 @@ typedef struct _jl_methtable_t {
intptr_t max_args; // max # of non-vararg arguments in a signature
jl_value_t *kwsorter; // keyword argument sorter function
jl_module_t *module; // used for incremental serialization to locate original binding
+ jl_array_t *backedges;
jl_mutex_t writelock;
} jl_methtable_t;
@@ -1016,7 +1025,7 @@ JL_DLLEXPORT jl_svec_t *jl_svec_fill(size_t n, jl_value_t *x);
JL_DLLEXPORT jl_value_t *jl_tupletype_fill(size_t n, jl_value_t *v);
JL_DLLEXPORT jl_sym_t *jl_symbol(const char *str);
JL_DLLEXPORT jl_sym_t *jl_symbol_lookup(const char *str);
-JL_DLLEXPORT jl_sym_t *jl_symbol_n(const char *str, int32_t len);
+JL_DLLEXPORT jl_sym_t *jl_symbol_n(const char *str, size_t len);
JL_DLLEXPORT jl_sym_t *jl_gensym(void);
JL_DLLEXPORT jl_sym_t *jl_tagged_gensym(const char *str, int32_t len);
JL_DLLEXPORT jl_sym_t *jl_get_root_symbol(void);
@@ -1026,6 +1035,7 @@ JL_DLLEXPORT jl_value_t *jl_generic_function_def(jl_sym_t *name, jl_value_t **bp
JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_code_info_t *f, jl_value_t *isstaged);
JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo);
JL_DLLEXPORT jl_code_info_t *jl_copy_code_info(jl_code_info_t *src);
+JL_DLLEXPORT size_t jl_get_world_counter(void);
JL_DLLEXPORT jl_function_t *jl_get_kwsorter(jl_typename_t *tn);
JL_DLLEXPORT jl_value_t *jl_box_bool(int8_t x);
JL_DLLEXPORT jl_value_t *jl_box_int8(int8_t x);
@@ -1409,6 +1419,7 @@ typedef struct _jl_handler_t {
sig_atomic_t defer_signal;
int finalizers_inhibited;
jl_timing_block_t *timing_stack;
+ size_t world_age;
} jl_handler_t;
typedef struct _jl_task_t {
@@ -1426,6 +1437,7 @@ typedef struct _jl_task_t {
size_t bufsz;
void *stkbuf;
+// hidden fields:
size_t ssize;
size_t started:1;
@@ -1435,6 +1447,8 @@ typedef struct _jl_task_t {
jl_gcframe_t *gcstack;
// current module, or NULL if this task has not set one
jl_module_t *current_module;
+ // current world age
+ size_t world_age;
// id of owning thread
// does not need to be defined until the task runs
@@ -1506,6 +1520,7 @@ STATIC_INLINE void jl_eh_restore_state(jl_handler_t *eh)
locks->len = eh->locks_len;
}
#endif
+ ptls->world_age = eh->world_age;
ptls->defer_signal = eh->defer_signal;
ptls->gc_state = eh->gc_state;
ptls->finalizers_inhibited = eh->finalizers_inhibited;
diff --git a/src/julia_internal.h b/src/julia_internal.h
index 368331f0da74d..f6323daac6544 100644
--- a/src/julia_internal.h
+++ b/src/julia_internal.h
@@ -39,12 +39,14 @@ extern "C" {
// useful constants
extern jl_methtable_t *jl_type_type_mt;
+extern size_t jl_world_counter;
typedef void (*tracer_cb)(jl_value_t *tracee);
void jl_call_tracer(tracer_cb callback, jl_value_t *tracee);
extern size_t jl_page_size;
extern jl_function_t *jl_typeinf_func;
+extern size_t jl_typeinf_world;
JL_DLLEXPORT extern int jl_lineno;
JL_DLLEXPORT extern const char *jl_filename;
@@ -190,10 +192,10 @@ STATIC_INLINE void *jl_gc_alloc_buf(jl_ptls_t ptls, size_t sz)
return jl_gc_alloc(ptls, sz, (void*)jl_buff_tag);
}
-jl_code_info_t *jl_type_infer(jl_method_instance_t *li, int force);
-jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *F);
-jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *src, const jl_cgparams_t *params);
-jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t *li);
+jl_code_info_t *jl_type_infer(jl_method_instance_t **li, size_t world, int force);
+jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *F, size_t world);
+jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t **pli, jl_code_info_t *src, size_t world, const jl_cgparams_t *params);
+jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t **li, size_t world);
JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types);
jl_code_info_t *jl_new_code_info_from_ast(jl_expr_t *ast);
jl_method_t *jl_new_method(jl_code_info_t *definition,
@@ -213,10 +215,11 @@ STATIC_INLINE jl_value_t *jl_call_method_internal(jl_method_instance_t *meth, jl
if (fptr.jlcall_api == 2)
return meth->inferred;
if (__unlikely(fptr.fptr == NULL || fptr.jlcall_api == 0)) {
+ size_t world = jl_get_ptls_states()->world_age;
// first see if it likely needs to be compiled
void *F = meth->functionObjectsDecls.functionObject;
if (!F) // ask codegen to try to turn it into llvm code
- F = jl_compile_for_dispatch(meth).functionObject;
+ F = jl_compile_for_dispatch(&meth, world).functionObject;
if (meth->jlcall_api == 2)
return meth->inferred;
// if it hasn't been inferred, try using the unspecialized meth cache instead
@@ -234,7 +237,7 @@ STATIC_INLINE jl_value_t *jl_call_method_internal(jl_method_instance_t *meth, jl
}
if (!fptr.fptr || fptr.jlcall_api == 0) {
// ask codegen to make the fptr
- fptr = jl_generate_fptr(meth, F);
+ fptr = jl_generate_fptr(meth, F, world);
if (fptr.jlcall_api == 2)
return meth->inferred;
}
@@ -290,8 +293,8 @@ void jl_set_t_uid_ctr(int i);
uint32_t jl_get_gs_ctr(void);
void jl_set_gs_ctr(uint32_t ctr);
-void JL_NORETURN jl_method_error_bare(jl_function_t *f, jl_value_t *args);
-void JL_NORETURN jl_method_error(jl_function_t *f, jl_value_t **args, size_t na);
+void JL_NORETURN jl_method_error_bare(jl_function_t *f, jl_value_t *args, size_t world);
+void JL_NORETURN jl_method_error(jl_function_t *f, jl_value_t **args, size_t na, size_t world);
jl_value_t *jl_get_exceptionf(jl_datatype_t *exception_type, const char *fmt, ...);
JL_DLLEXPORT void jl_typeassert(jl_value_t *x, jl_value_t *t);
@@ -363,11 +366,11 @@ int jl_is_toplevel_only_expr(jl_value_t *e);
jl_value_t *jl_call_scm_on_ast(const char *funcname, jl_value_t *expr);
jl_method_instance_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *types,
- int cache, int inexact, int allow_exec);
-jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache);
+ int cache, int inexact, int allow_exec, size_t world);
+jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache, size_t world);
jl_value_t *jl_gf_invoke(jl_tupletype_t *types, jl_value_t **args, size_t nargs);
-jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes);
+JL_DLLEXPORT jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes);
int jl_has_intrinsics(jl_method_instance_t *li, jl_value_t *v, jl_module_t *m);
jl_value_t *jl_nth_slot_type(jl_tupletype_t *sig, size_t i);
@@ -479,11 +482,13 @@ int32_t jl_jlcall_api(/*llvm::Function*/const void *function);
JL_DLLEXPORT jl_array_t *jl_idtable_rehash(jl_array_t *a, size_t newsz);
JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module);
-jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types);
+jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world);
JL_DLLEXPORT int jl_has_call_ambiguities(jl_tupletype_t *types, jl_method_t *m);
jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp);
-JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_tupletype_t *type);
-JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams);
+JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_tupletype_t *type, size_t world);
+JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams, size_t world);
+JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_method_instance_t *caller);
+JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller);
uint32_t jl_module_next_counter(jl_module_t *m);
void jl_fptr_to_llvm(jl_fptr_t fptr, jl_method_instance_t *lam, int specsig);
@@ -764,21 +769,22 @@ jl_typemap_entry_t *jl_typemap_insert(union jl_typemap_t *cache, jl_value_t *par
jl_tupletype_t *simpletype, jl_svec_t *guardsigs,
jl_value_t *newvalue, int8_t offs,
const struct jl_typemap_info *tparams,
+ size_t min_world, size_t max_world,
jl_value_t **overwritten);
jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_tupletype_t *types, jl_svec_t **penv,
- int8_t subtype_inexact__sigseq_useenv, int8_t subtype, int8_t offs);
+ int8_t subtype_inexact__sigseq_useenv, int8_t subtype, int8_t offs, size_t world);
static jl_typemap_entry_t *const INEXACT_ENTRY = (jl_typemap_entry_t*)(uintptr_t)-1;
-jl_typemap_entry_t *jl_typemap_level_assoc_exact(jl_typemap_level_t *cache, jl_value_t **args, size_t n, int8_t offs);
-jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *mn, jl_value_t **args, size_t n);
-STATIC_INLINE jl_typemap_entry_t *jl_typemap_assoc_exact(union jl_typemap_t ml_or_cache, jl_value_t **args, size_t n, int8_t offs)
+jl_typemap_entry_t *jl_typemap_level_assoc_exact(jl_typemap_level_t *cache, jl_value_t **args, size_t n, int8_t offs, size_t world);
+jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *mn, jl_value_t **args, size_t n, size_t world);
+STATIC_INLINE jl_typemap_entry_t *jl_typemap_assoc_exact(union jl_typemap_t ml_or_cache, jl_value_t **args, size_t n, int8_t offs, size_t world)
{
// NOTE: This function is a huge performance hot spot!!
if (jl_typeof(ml_or_cache.unknown) == (jl_value_t*)jl_typemap_entry_type) {
- return jl_typemap_entry_assoc_exact(ml_or_cache.leaf, args, n);
+ return jl_typemap_entry_assoc_exact(ml_or_cache.leaf, args, n, world);
}
else if (jl_typeof(ml_or_cache.unknown) == (jl_value_t*)jl_typemap_level_type) {
- return jl_typemap_level_assoc_exact(ml_or_cache.node, args, n, offs);
+ return jl_typemap_level_assoc_exact(ml_or_cache.node, args, n, offs, world);
}
return NULL;
}
diff --git a/src/julia_threads.h b/src/julia_threads.h
index 1b5eb5ad11856..0b496d9ac20b9 100644
--- a/src/julia_threads.h
+++ b/src/julia_threads.h
@@ -68,6 +68,7 @@ typedef struct {
#define JL_MAX_BT_SIZE 80000
typedef struct _jl_tls_states_t {
struct _jl_gcframe_t *pgcstack;
+ size_t world_age;
struct _jl_value_t *exception_in_transit;
volatile size_t *safepoint;
// Whether it is safe to execute GC at the same time.
diff --git a/src/options.h b/src/options.h
index c48f729b6865e..93d1f8fb76939 100644
--- a/src/options.h
+++ b/src/options.h
@@ -27,6 +27,11 @@
// delete julia IR for non-inlineable functions after they're codegen'd
#define JL_DELETE_NON_INLINEABLE 1
+// fill in the jl_all_methods in world-counter order
+// so that it is possible to map (in a debugger) from
+// an inferred world validity range back to the offending definition
+// #define RECORD_METHOD_ORDER
+
// GC options -----------------------------------------------------------------
// debugging options
diff --git a/src/task.c b/src/task.c
index da456112945a8..c89fbaca3983a 100644
--- a/src/task.c
+++ b/src/task.c
@@ -257,6 +257,7 @@ static void NOINLINE JL_NORETURN start_task(void)
jl_sigint_safepoint(ptls);
}
JL_TIMING(ROOT);
+ ptls->world_age = jl_world_counter;
res = jl_apply(&t->start, 1);
}
JL_CATCH {
@@ -265,6 +266,7 @@ static void NOINLINE JL_NORETURN start_task(void)
jl_gc_wb(t, res);
}
}
+ jl_get_ptls_states()->world_age = jl_world_counter; // TODO
finish_task(t, res);
gc_debug_critical_error();
abort();
@@ -306,14 +308,16 @@ static void ctx_switch(jl_ptls_t ptls, jl_task_t *t, jl_jmp_buf *where)
if (!jl_setjmp(ptls->current_task->ctx, 0)) {
// backtraces don't survive task switches, see e.g. issue #12485
ptls->bt_size = 0;
-#ifdef COPY_STACKS
jl_task_t *lastt = ptls->current_task;
+#ifdef COPY_STACKS
save_stack(ptls, lastt);
#endif
// set up global state for new task
- ptls->current_task->gcstack = ptls->pgcstack;
+ lastt->gcstack = ptls->pgcstack;
+ lastt->world_age = ptls->world_age;
ptls->pgcstack = t->gcstack;
+ ptls->world_age = t->world_age;
#ifdef JULIA_ENABLE_THREADING
// If the current task is not holding any locks, free the locks list
// so that it can be GC'd without leaking memory
diff --git a/src/toplevel.c b/src/toplevel.c
index 700a288569be5..2f0b85f2b8aac 100644
--- a/src/toplevel.c
+++ b/src/toplevel.c
@@ -73,14 +73,17 @@ static jl_function_t *jl_module_get_initializer(jl_module_t *m)
return (jl_function_t*)jl_get_global(m, jl_symbol("__init__"));
}
+
void jl_module_run_initializer(jl_module_t *m)
{
- jl_ptls_t ptls = jl_get_ptls_states();
jl_function_t *f = jl_module_get_initializer(m);
if (f == NULL)
return;
+ size_t last_age = jl_get_ptls_states()->world_age;
JL_TRY {
+ jl_get_ptls_states()->world_age = jl_world_counter;
jl_apply(&f, 1);
+ jl_get_ptls_states()->world_age = last_age;
}
JL_CATCH {
if (jl_initerror_type == NULL) {
@@ -88,12 +91,11 @@ void jl_module_run_initializer(jl_module_t *m)
}
else {
jl_rethrow_other(jl_new_struct(jl_initerror_type, m->name,
- ptls->exception_in_transit));
+ jl_exception_in_transit));
}
}
}
-
// load time init procedure: in build mode, only record order
static void jl_module_load_time_initialize(jl_module_t *m)
{
@@ -174,25 +176,30 @@ jl_value_t *jl_eval_module_expr(jl_expr_t *ex)
jl_value_t *defaultdefs = NULL, *form = NULL;
JL_GC_PUSH3(&last_module, &defaultdefs, &form);
+ size_t last_age = ptls->world_age;
jl_module_t *task_last_m = ptls->current_task->current_module;
ptls->current_task->current_module = ptls->current_module = newm;
+
jl_module_t *prev_outermost = outermost;
size_t stackidx = module_stack.len;
if (outermost == NULL)
outermost = newm;
- if (std_imports) {
- // add `eval` function
- defaultdefs = jl_call_scm_on_ast("module-default-defs", (jl_value_t*)ex);
- jl_toplevel_eval_flex(defaultdefs, 0, 1);
- defaultdefs = NULL;
- }
-
jl_array_t *exprs = ((jl_expr_t*)jl_exprarg(ex, 2))->args;
JL_TRY {
- for(int i=0; i < jl_array_len(exprs); i++) {
+ if (std_imports) {
+ // add `eval` function
+ defaultdefs = jl_call_scm_on_ast("module-default-defs", (jl_value_t*)ex);
+ ptls->world_age = jl_world_counter;
+ jl_toplevel_eval_flex(defaultdefs, 0, 1);
+ defaultdefs = NULL;
+ }
+
+ for (int i = 0; i < jl_array_len(exprs); i++) {
// process toplevel form
+ ptls->world_age = jl_world_counter;
form = jl_expand(jl_array_ptr_ref(exprs, i));
+ ptls->world_age = jl_world_counter;
(void)jl_toplevel_eval_flex(form, 1, 1);
}
}
@@ -204,6 +211,7 @@ jl_value_t *jl_eval_module_expr(jl_expr_t *ex)
jl_rethrow();
}
JL_GC_POP();
+ ptls->world_age = last_age;
ptls->current_module = last_module;
ptls->current_task->current_module = task_last_m;
outermost = prev_outermost;
@@ -233,8 +241,8 @@ jl_value_t *jl_eval_module_expr(jl_expr_t *ex)
if (outermost == NULL || ptls->current_module == jl_main_module) {
JL_TRY {
- size_t i, l=module_stack.len;
- for(i = stackidx; i < l; i++) {
+ size_t i, l = module_stack.len;
+ for (i = stackidx; i < l; i++) {
jl_module_load_time_initialize((jl_module_t*)module_stack.items[i]);
}
assert(module_stack.len == l);
@@ -634,7 +642,8 @@ jl_value_t *jl_toplevel_eval_flex(jl_value_t *e, int fast, int expanded)
if (ewc) {
li = jl_new_thunk(thk);
- jl_type_infer(li, 0);
+ size_t world = jl_get_ptls_states()->world_age;
+ jl_type_infer(&li, world, 0);
jl_value_t *dummy_f_arg = NULL;
result = jl_call_method_internal(li, &dummy_f_arg, 1);
}
@@ -791,7 +800,7 @@ static jl_datatype_t *first_arg_datatype(jl_value_t *a, int got_tuple1)
}
// get DataType of first tuple element, or NULL if cannot be determined
-jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes)
+JL_DLLEXPORT jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes)
{
return first_arg_datatype(argtypes, 0);
}
diff --git a/src/typemap.c b/src/typemap.c
index 55fbd9f981379..e4d911c1881a8 100644
--- a/src/typemap.c
+++ b/src/typemap.c
@@ -596,11 +596,14 @@ int sigs_eq(jl_value_t *a, jl_value_t *b, int useenv)
there tends to be lots of variation there. The type of the 0th argument
(the function) is always the same for most functions.
*/
-static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_tupletype_t *types, int8_t inexact, jl_svec_t **penv)
+static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_tupletype_t *types,
+ int8_t inexact, jl_svec_t **penv, size_t world)
{
size_t n = jl_field_count(types);
int typesisva = n == 0 ? 0 : jl_is_vararg_type(jl_tparam(types, n-1));
- while (ml != (void*)jl_nothing) {
+ for (; ml != (void*)jl_nothing; ml = ml->next) {
+ if (world < ml->min_world || world > ml->max_world)
+ continue; // ignore replaced methods
size_t lensig = jl_field_count(ml->sig);
if (lensig == n || (ml->va && lensig <= n+1)) {
int resetenv = 0, ismatch = 1;
@@ -681,19 +684,19 @@ static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_
if (resetenv)
*penv = jl_emptysvec;
}
- ml = ml->next;
}
return NULL;
}
-static jl_typemap_entry_t *jl_typemap_lookup_by_type_(jl_typemap_entry_t *ml, jl_tupletype_t *types, int8_t useenv)
+static jl_typemap_entry_t *jl_typemap_lookup_by_type_(jl_typemap_entry_t *ml, jl_tupletype_t *types, int8_t useenv, size_t world)
{
- while (ml != (void*)jl_nothing) {
+ for (; ml != (void*)jl_nothing; ml = ml->next) {
+ if (world < ml->min_world || world > ml->max_world)
+ continue;
// TODO: more efficient
if (sigs_eq((jl_value_t*)types, (jl_value_t*)ml->sig, useenv)) {
return ml;
}
- ml = ml->next;
}
return NULL;
}
@@ -702,7 +705,7 @@ static jl_typemap_entry_t *jl_typemap_lookup_by_type_(jl_typemap_entry_t *ml, jl
// this is the general entry point for looking up a type in the cache
// (as a subtype, or with typeseq)
jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_tupletype_t *types, jl_svec_t **penv,
- int8_t subtype_inexact__sigseq_useenv, int8_t subtype, int8_t offs)
+ int8_t subtype_inexact__sigseq_useenv, int8_t subtype, int8_t offs, size_t world)
{
if (jl_typeof(ml_or_cache.unknown) == (jl_value_t*)jl_typemap_level_type) {
jl_typemap_level_t *cache = ml_or_cache.node;
@@ -727,7 +730,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_
// If there is a type at offs, look in the optimized caches
if (!subtype) {
if (ty && jl_is_any(ty))
- return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1);
+ return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1, world);
if (isva) // in lookup mode, want to match Vararg exactly, not as a subtype
ty = NULL;
}
@@ -738,7 +741,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_
union jl_typemap_t ml = mtcache_hash_lookup(&cache->targ, a0, 1, offs);
if (ml.unknown != jl_nothing) {
jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv,
- subtype_inexact__sigseq_useenv, subtype, offs+1);
+ subtype_inexact__sigseq_useenv, subtype, offs+1, world);
if (li) return li;
}
}
@@ -748,7 +751,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_
union jl_typemap_t ml = mtcache_hash_lookup(&cache->arg1, ty, 0, offs);
if (ml.unknown != jl_nothing) {
jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv,
- subtype_inexact__sigseq_useenv, subtype, offs+1);
+ subtype_inexact__sigseq_useenv, subtype, offs+1, world);
if (li) return li;
}
}
@@ -756,41 +759,43 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_
}
// Always check the list (since offs doesn't always start at 0)
if (subtype) {
- jl_typemap_entry_t *li = jl_typemap_assoc_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv, penv);
+ jl_typemap_entry_t *li = jl_typemap_assoc_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv, penv, world);
if (li) return li;
- return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1);
+ return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1, world);
}
else {
- return jl_typemap_lookup_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv);
+ return jl_typemap_lookup_by_type_(cache->linear, types, subtype_inexact__sigseq_useenv, world);
}
}
else {
return subtype ?
- jl_typemap_assoc_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv, penv) :
- jl_typemap_lookup_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv);
+ jl_typemap_assoc_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv, penv, world) :
+ jl_typemap_lookup_by_type_(ml_or_cache.leaf, types, subtype_inexact__sigseq_useenv, world);
}
}
-jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *ml, jl_value_t **args, size_t n)
+jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *ml, jl_value_t **args, size_t n, size_t world)
{
// some manually-unrolled common special cases
while (ml->simplesig == (void*)jl_nothing && ml->guardsigs == jl_emptysvec && ml->isleafsig) {
// use a tight loop for a long as possible
- if (n == jl_field_count(ml->sig) && jl_typeof(args[0]) == jl_tparam(ml->sig, 0)) {
- if (n == 1)
- return ml;
- if (n == 2) {
- if (jl_typeof(args[1]) == jl_tparam(ml->sig, 1))
- return ml;
- }
- else if (n == 3) {
- if (jl_typeof(args[1]) == jl_tparam(ml->sig, 1) &&
- jl_typeof(args[2]) == jl_tparam(ml->sig, 2))
- return ml;
- }
- else {
- if (sig_match_leaf(args, jl_svec_data(ml->sig->parameters), n))
+ if (world >= ml->min_world && world <= ml->max_world) {
+ if (n == jl_field_count(ml->sig) && jl_typeof(args[0]) == jl_tparam(ml->sig, 0)) {
+ if (n == 1)
return ml;
+ if (n == 2) {
+ if (jl_typeof(args[1]) == jl_tparam(ml->sig, 1))
+ return ml;
+ }
+ else if (n == 3) {
+ if (jl_typeof(args[1]) == jl_tparam(ml->sig, 1) &&
+ jl_typeof(args[2]) == jl_tparam(ml->sig, 2))
+ return ml;
+ }
+ else {
+ if (sig_match_leaf(args, jl_svec_data(ml->sig->parameters), n))
+ return ml;
+ }
}
}
ml = ml->next;
@@ -798,7 +803,9 @@ jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *ml, jl_valu
return NULL;
}
- while (ml != (void*)jl_nothing) {
+ for (; ml != (void*)jl_nothing; ml = ml->next) {
+ if (world < ml->min_world || world > ml->max_world)
+ continue; // ignore replaced methods
size_t lensig = jl_field_count(ml->sig);
if (lensig == n || (ml->va && lensig <= n+1)) {
if (ml->simplesig != (void*)jl_nothing) {
@@ -806,24 +813,24 @@ jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *ml, jl_valu
int isva = lensimplesig > 0 && jl_is_vararg_type(jl_tparam(ml->simplesig, lensimplesig - 1));
if (lensig == n || (isva && lensimplesig <= n + 1)) {
if (!sig_match_simple(args, n, jl_svec_data(ml->simplesig->parameters), isva, lensimplesig))
- goto nomatch;
+ continue;
}
else {
- goto nomatch;
+ continue;
}
}
if (ml->isleafsig) {
if (!sig_match_leaf(args, jl_svec_data(ml->sig->parameters), n))
- goto nomatch;
+ continue;
}
else if (ml->issimplesig) {
if (!sig_match_simple(args, n, jl_svec_data(ml->sig->parameters), ml->va, lensig))
- goto nomatch;
+ continue;
}
else {
if (!jl_tuple_subtype(args, n, ml->sig, 1))
- goto nomatch;
+ continue;
}
size_t i, l;
@@ -838,14 +845,14 @@ jl_typemap_entry_t *jl_typemap_entry_assoc_exact(jl_typemap_entry_t *ml, jl_valu
}
}
return ml;
- }
nomatch:
- ml = ml->next;
+ continue;
+ }
}
return NULL;
}
-jl_typemap_entry_t *jl_typemap_level_assoc_exact(jl_typemap_level_t *cache, jl_value_t **args, size_t n, int8_t offs)
+jl_typemap_entry_t *jl_typemap_level_assoc_exact(jl_typemap_level_t *cache, jl_value_t **args, size_t n, int8_t offs, size_t world)
{
if (n > offs) {
jl_value_t *a1 = args[offs];
@@ -853,21 +860,21 @@ jl_typemap_entry_t *jl_typemap_level_assoc_exact(jl_typemap_level_t *cache, jl_v
assert(jl_is_datatype(ty));
if (ty == (jl_value_t*)jl_datatype_type && cache->targ.values != (void*)jl_nothing) {
union jl_typemap_t ml_or_cache = mtcache_hash_lookup(&cache->targ, a1, 1, offs);
- jl_typemap_entry_t *ml = jl_typemap_assoc_exact(ml_or_cache, args, n, offs+1);
+ jl_typemap_entry_t *ml = jl_typemap_assoc_exact(ml_or_cache, args, n, offs+1, world);
if (ml) return ml;
}
if (cache->arg1.values != (void*)jl_nothing) {
union jl_typemap_t ml_or_cache = mtcache_hash_lookup(&cache->arg1, ty, 0, offs);
- jl_typemap_entry_t *ml = jl_typemap_assoc_exact(ml_or_cache, args, n, offs+1);
+ jl_typemap_entry_t *ml = jl_typemap_assoc_exact(ml_or_cache, args, n, offs+1, world);
if (ml) return ml;
}
}
if (cache->linear != (jl_typemap_entry_t*)jl_nothing) {
- jl_typemap_entry_t *ml = jl_typemap_entry_assoc_exact(cache->linear, args, n);
+ jl_typemap_entry_t *ml = jl_typemap_entry_assoc_exact(cache->linear, args, n, world);
if (ml) return ml;
}
if (cache->any.unknown != jl_nothing)
- return jl_typemap_assoc_exact(cache->any, args, n, offs+1);
+ return jl_typemap_assoc_exact(cache->any, args, n, offs+1, world);
return NULL;
}
@@ -1008,39 +1015,28 @@ jl_typemap_entry_t *jl_typemap_insert(union jl_typemap_t *cache, jl_value_t *par
jl_tupletype_t *simpletype, jl_svec_t *guardsigs,
jl_value_t *newvalue, int8_t offs,
const struct jl_typemap_info *tparams,
+ size_t min_world, size_t max_world,
jl_value_t **overwritten)
{
jl_ptls_t ptls = jl_get_ptls_states();
+ assert(min_world > 0 && max_world > 0);
assert(jl_is_tuple_type(type));
if (!simpletype) {
simpletype = (jl_tupletype_t*)jl_nothing;
}
if ((jl_value_t*)simpletype == jl_nothing) {
- jl_typemap_entry_t *ml = jl_typemap_assoc_by_type(*cache, type, NULL, 1, 0, offs);
+ jl_typemap_entry_t *ml = jl_typemap_assoc_by_type(*cache, type, NULL, 1, 0, offs, min_world);
if (ml && ml->simplesig == (void*)jl_nothing) {
+ if (newvalue == ml->func.value) // no change. TODO: involve world in computation!
+ return ml;
if (overwritten != NULL)
*overwritten = ml->func.value;
if (newvalue == NULL) // don't overwrite with guard entries
return ml;
- // sigatomic begin
- ml->sig = type;
- jl_gc_wb(ml, ml->sig);
- ml->simplesig = simpletype;
- jl_gc_wb(ml, ml->simplesig);
- ml->tvars = tvars;
- jl_gc_wb(ml, ml->tvars);
- ml->va = jl_is_va_tuple(type);
- // TODO: `l->func` or `l->func->roots` might need to be rooted
- ml->func.value = newvalue;
- if (newvalue)
- jl_gc_wb(ml, newvalue);
- // sigatomic end
- return ml;
+ ml->max_world = min_world - 1;
}
}
- if (overwritten != NULL)
- *overwritten = NULL;
jl_typemap_entry_t *newrec =
(jl_typemap_entry_t*)jl_gc_alloc(ptls, sizeof(jl_typemap_entry_t),
@@ -1051,6 +1047,8 @@ jl_typemap_entry_t *jl_typemap_insert(union jl_typemap_t *cache, jl_value_t *par
newrec->func.value = newvalue;
newrec->guardsigs = guardsigs;
newrec->next = (jl_typemap_entry_t*)jl_nothing;
+ newrec->min_world = min_world;
+ newrec->max_world = max_world;
// compute the complexity of this type signature
newrec->va = jl_is_va_tuple(type);
newrec->issimplesig = (tvars == jl_emptysvec); // a TypeVar environment needs an complex matching test
diff --git a/test/choosetests.jl b/test/choosetests.jl
index 46310500bd2b4..98634c0c87073 100644
--- a/test/choosetests.jl
+++ b/test/choosetests.jl
@@ -15,7 +15,8 @@ Upon return, `tests` is a vector of fully-expanded test names, and
""" ->
function choosetests(choices = [])
testnames = [
- "linalg", "subarray", "core", "inference", "keywordargs", "numbers",
+ "linalg", "subarray", "core", "inference", "worlds",
+ "keywordargs", "numbers",
"printf", "char", "strings", "triplequote", "unicode",
"dates", "dict", "hashing", "iobuffer", "staged", "offsetarray",
"arrayops", "tuple", "reduce", "reducedim", "random", "abstractarray",
diff --git a/test/compile.jl b/test/compile.jl
index 65d4669e9687a..5057190db4e33 100644
--- a/test/compile.jl
+++ b/test/compile.jl
@@ -121,8 +121,8 @@ try
let some_method = @which Base.include("string")
# global const some_method // FIXME: support for serializing a direct reference to an external Method not implemented
global const some_linfo =
- ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any),
- some_method, Tuple{typeof(Base.include), String}, Core.svec())
+ ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any, UInt),
+ some_method, Tuple{typeof(Base.include), String}, Core.svec(), typemax(UInt))
end
end
""")
@@ -146,6 +146,14 @@ try
wait(t)
let Foo = getfield(Main, Foo_module)
+ @test_throws MethodError Foo.foo(17) # world shouldn't be visible yet
+ end
+ @eval let Foo_module = $(QuoteNode(Foo_module)), # use @eval to see the results of loading the compile
+ FooBase_module = $(QuoteNode(FooBase_module)),
+ Foo = getfield(Main, Foo_module),
+ dir = $(QuoteNode(dir)),
+ cachefile = $(QuoteNode(cachefile)),
+ Foo_file = $(QuoteNode(Foo_file))
@test Foo.foo(17) == 18
@test Foo.Bar.bar(17) == 19
@@ -187,8 +195,8 @@ try
0:25)
some_method = @which Base.include("string")
some_linfo =
- ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any),
- some_method, Tuple{typeof(Base.include), String}, Core.svec())
+ ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any, UInt),
+ some_method, Tuple{typeof(Base.include), String}, Core.svec(), typemax(UInt))
@test Foo.some_linfo::Core.MethodInstance === some_linfo
PV = Foo.Value18343{Nullable}.types[1]
diff --git a/test/core.jl b/test/core.jl
index c0c076b516aa5..d0a43d40effbd 100644
--- a/test/core.jl
+++ b/test/core.jl
@@ -3367,7 +3367,7 @@ typealias PossiblyInvalidUnion{T} Union{T,Int}
# issue #13007
call13007{T,N}(::Type{Array{T,N}}) = 0
call13007(::Type{Array}) = 1
-@test length(Base._methods(call13007, Tuple{Type{TypeVar(:_,Array)}}, 4)) == 2
+@test length(Base._methods(call13007, Tuple{Type{TypeVar(:_,Array)}}, 4, typemax(UInt))) == 2
# detecting cycles during type intersection, e.g. #1631
cycle_in_solve_tvar_constraints{S}(::Type{Nullable{S}}, x::S) = 0
diff --git a/test/loading.jl b/test/loading.jl
index 01865b0379b65..7e7f707d1b1f9 100644
--- a/test/loading.jl
+++ b/test/loading.jl
@@ -6,7 +6,8 @@ using Base.Test
include("test_sourcepath.jl")
thefname = "the fname!//\\&\1*"
-@test include_string("include_string_test() = @__FILE__", thefname)() == Base.source_path()
+include_string_test_func = include_string("include_string_test() = @__FILE__", thefname)
+@test include_string_test_func() == Base.source_path()
@test include_string("Base.source_path()", thefname) == Base.source_path()
@test basename(@__FILE__) == "loading.jl"
@test isabspath(@__FILE__)
diff --git a/test/reflection.jl b/test/reflection.jl
index 1cf82befceec3..59b572d399a9f 100644
--- a/test/reflection.jl
+++ b/test/reflection.jl
@@ -339,10 +339,15 @@ end
# test jl_get_llvm_fptr. We test functions both in and definitely not in the system image
definitely_not_in_sysimg() = nothing
-for (f,t) in ((definitely_not_in_sysimg,Tuple{}),
- (Base.throw_boundserror,Tuple{UnitRange{Int64},Int64}))
- t = Base.tt_cons(Core.Typeof(f), Base.to_tuple_type(t))
- llvmf = ccall(:jl_get_llvmf, Ptr{Void}, (Any, Bool, Bool), t, false, true)
+for (f, t) in Any[(definitely_not_in_sysimg, Tuple{}),
+ (Base.:+, Tuple{Int, Int})]
+ meth = which(f, t)
+ tt = Tuple{typeof(f), t.parameters...}
+ env = (ccall(:jl_match_method, Any, (Any, Any, Any), tt, meth.sig, meth.tvars))[2]
+ world = typemax(UInt)
+ linfo = ccall(:jl_specializations_get_linfo, Ref{Core.MethodInstance}, (Any, Any, Any, UInt), meth, tt, env, world)
+ params = Base.CodegenParams()
+ llvmf = ccall(:jl_get_llvmf_decl, Ptr{Void}, (Any, UInt, Bool, Base.CodegenParams), linfo::Core.MethodInstance, world, true, params)
@test llvmf != C_NULL
@test ccall(:jl_get_llvm_fptr, Ptr{Void}, (Ptr{Void},), llvmf) != C_NULL
end
@@ -586,6 +591,7 @@ end
# PR #18888: code_typed shouldn't cache if not optimizing
let
+ world = typemax(UInt)
f18888() = return nothing
m = first(methods(f18888, Tuple{}))
@test m.specializations === nothing
@@ -593,11 +599,11 @@ let
code_typed(f18888, Tuple{}; optimize=false)
@test m.specializations !== nothing # uncached, but creates the specializations entry
- code = Core.Inference.code_for_method(m, Tuple{ft}, Core.svec(), true)
+ code = Core.Inference.code_for_method(m, Tuple{ft}, Core.svec(), world, true)
@test !isdefined(code, :inferred)
code_typed(f18888, Tuple{}; optimize=true)
- code = Core.Inference.code_for_method(m, Tuple{ft}, Core.svec(), true)
+ code = Core.Inference.code_for_method(m, Tuple{ft}, Core.svec(), world, true)
@test isdefined(code, :inferred)
end
diff --git a/test/runtests.jl b/test/runtests.jl
index 5172d85719437..7d2b65545df4a 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -69,7 +69,7 @@ cd(dirname(@__FILE__)) do
push!(results, (test, resp))
if (isa(resp[end], Integer) && (resp[end] > max_worker_rss)) || isa(resp, Exception)
if n > 1
- rmprocs(p, waitfor=0.5)
+ rmprocs(p, waitfor=5.0)
p = addprocs(1; exename=test_exename, exeflags=test_exeflags)[1]
remotecall_fetch(()->include("testdefs.jl"), p)
else
@@ -108,7 +108,7 @@ cd(dirname(@__FILE__)) do
n > 1 && print("\tFrom worker 1:\t")
local resp
try
- resp = runtests(t)
+ resp = eval(Expr(:call, () -> runtests(t))) # runtests is defined by the include above
catch e
resp = [e]
end
diff --git a/test/staged.jl b/test/staged.jl
index 3a763891f9e1c..3be262f9779f8 100644
--- a/test/staged.jl
+++ b/test/staged.jl
@@ -154,10 +154,15 @@ end
# @generated functions including inner functions
@generated function _g_f_with_inner(x)
- :(y->y)
+ return :(y -> y)
end
@test_throws ErrorException _g_f_with_inner(1)
+@generated function _g_f_with_inner2(x)
+ return y -> y
+end
+@test _g_f_with_inner2(1)(2) == 2
+
# @generated functions errors
global gf_err_ref = Ref{Int}()
diff --git a/test/worlds.jl b/test/worlds.jl
new file mode 100644
index 0000000000000..cd28b0039f429
--- /dev/null
+++ b/test/worlds.jl
@@ -0,0 +1,165 @@
+# This file is a part of Julia. License is MIT: http://julialang.org/license
+
+# tests for accurate updating of method tables
+
+tls_world_age() = ccall(:jl_get_tls_world_age, UInt, ())
+world_counter() = ccall(:jl_get_world_counter, UInt, ())
+@test typemax(UInt) > world_counter() == tls_world_age() > 0
+
+# test simple method replacement
+begin
+ g265a() = f265a(0)
+ f265a(x::Any) = 1
+ @test g265a() == 1
+ @test Base.return_types(g265a, ()) == Any[Int]
+ @test Core.Inference.return_type(g265a, ()) == Int
+
+ f265a(x::Any) = 2.0
+ @test g265a() == 2.0
+
+ @test Base.return_types(g265a, ()) == Any[Float64]
+ @test Core.Inference.return_type(g265a, ()) == Float64
+end
+
+# test signature widening
+begin
+ f265b(x::Int) = 1
+ let ty = Any[1, 2.0e0]
+ global g265b(i::Int) = f265b(ty[i])
+ end
+ @test g265b(1) == 1
+ @test Base.return_types(g265b, (Int,)) == Any[Int]
+ @test Core.Inference.return_type(g265b, (Int,)) == Int
+
+ f265b(x::Any) = 2.0
+ @test g265b(1) == 1
+ @test g265b(2) == 2.0
+ @test Base.return_types(g265b, (Int,)) == Any[Union{Int, Float64}]
+ @test Core.Inference.return_type(g265b, (Int,)) == Union{Int, Float64}
+end
+
+# test signature narrowing
+begin
+ g265c() = f265c(0)
+ f265c(x::Any) = 1
+ @test g265c() == 1
+ @test Base.return_types(g265c, ()) == Any[Int]
+ @test Core.Inference.return_type(g265c, ()) == Int
+
+ f265c(x::Int) = 2.0
+ @test g265c() == 2.0
+
+ @test Base.return_types(g265c, ()) == Any[Float64]
+ @test Core.Inference.return_type(g265c, ()) == Float64
+end
+
+# test constructor narrowing
+type A265{T}
+ field1::T
+end
+A265_() = A265(1)
+@test (A265_()::A265{Int}).field1 === 1
+A265(fld::Int) = A265(Float64(fld))
+@test (A265_()::A265{Float64}).field1 === 1.0e0
+
+# test constructor widening
+type B265{T}
+ field1::T
+ # dummy arg is present to prevent (::Type{T}){T}(arg) from matching the test calls
+ B265(field1::Any, dummy::Void) = new(field1) # prevent generation of outer ctor
+end
+ # define some constructors
+B265(x::Int, dummy::Void) = B265{Int}(x, dummy)
+let ty = Any[1, 2.0e0, 3.0f0]
+ global B265_(i::Int) = B265(ty[i], nothing)
+end
+ # test for correct answers
+@test (B265_(1)::B265{Int}).field1 === 1
+@test_throws MethodError B265_(2)
+@test_throws MethodError B265_(3)
+@test Base.return_types(B265_, (Int,)) == Any[B265{Int}]
+@test Core.Inference.return_type(B265_, (Int,)) == B265{Int}
+
+ # add new constructors
+B265(x::Float64, dummy::Void) = B265{Float64}(x, dummy)
+B265(x::Any, dummy::Void) = B265{UInt8}(x, dummy)
+
+ # make sure answers are updated
+@test (B265_(1)::B265{Int}).field1 === 1
+@test (B265_(2)::B265{Float64}).field1 === 2.0e0
+@test (B265_(3)::B265{UInt8}).field1 === 0x03
+
+@test Base.return_types(B265_, (Int,)) == Any[Union{B265{Float64}, B265{Int}, B265{UInt8}}]
+@test Core.Inference.return_type(B265_, (Int,)) == Union{B265{Float64}, B265{Int}, B265{UInt8}}
+
+
+# test oldworld call / inference
+g265() = [f265(x) for x in 1:3.]
+wc265 = world_counter()
+f265(::Any) = 1.0
+@test wc265 + 1 == world_counter()
+t265 = @async begin
+ local ret = nothing
+ while true
+ (f, args) = produce(ret)
+ ret = f(args...)
+ end
+end
+@test consume(t265) === nothing
+wc265 = world_counter()
+@test consume(t265, world_counter, ()) == wc265
+@test consume(t265, tls_world_age, ()) == wc265
+f265(::Int) = 1
+@test consume(t265, world_counter, ()) == wc265 + 1 == world_counter() == tls_world_age()
+@test consume(t265, tls_world_age, ()) == wc265
+
+@test g265() == Int[1, 1, 1]
+@test Core.Inference.return_type(f265, (Any,)) == Union{Float64, Int}
+@test Core.Inference.return_type(f265, (Int,)) == Int
+@test Core.Inference.return_type(f265, (Float64,)) == Float64
+
+@test consume(t265, g265, ()) == Float64[1.0, 1.0, 1.0]
+@test consume(t265, Core.Inference.return_type, (f265, (Any,))) == Float64
+@test consume(t265, Core.Inference.return_type, (f265, (Int,))) == Float64
+@test consume(t265, Core.Inference.return_type, (f265, (Float64,))) == Float64
+@test consume(t265, Core.Inference.return_type, (f265, (Float64,))) == Float64
+
+
+# test that reflection ignores worlds
+@test Base.return_types(f265, (Any,)) == Any[Int, Float64]
+@test consume(t265, Base.return_types, (f265, (Any,))) == Any[Int, Float64]
+
+
+# test for method errors
+h265() = true
+loc_h265 = "$(Base.source_path()):$(@__LINE__ - 1)"
+@test h265()
+@test_throws MethodError consume(t265, h265, ())
+@test_throws MethodError wait(t265)
+@test istaskdone(t265)
+let ex = t265.exception
+ @test ex.f == h265
+ @test ex.args == ()
+ @test ex.world == wc265
+ str = sprint(showerror, ex)
+ wc = world_counter()
+ cmp = """
+ MethodError: no method matching h265()
+ The applicable method may be too new: running in world age $wc265, while current world is $wc."""
+ @test startswith(str, cmp)
+ cmp = "\n h265() at $loc_h265 (method too new to be called from this world context.)"
+ @test contains(str, cmp)
+end
+
+# test for generated function correctness
+# and min/max world computation validity of cache_method
+f_gen265(x) = 1
+@generated g_gen265(x) = f_gen265(x)
+@generated h_gen265(x) = :(f_gen265(x))
+f_gen265(x::Int) = 2
+f_gen265(x::Type{Int}) = 3
+@generated g_gen265b(x) = f_gen265(x)
+@test h_gen265(0) == 2
+@test g_gen265(0) == 1
+@test f_gen265(Int) == 3
+@test g_gen265b(0) == 3
diff --git a/ui/repl.c b/ui/repl.c
index 569d294ffaaa3..bbd0cb02b6bb9 100644
--- a/ui/repl.c
+++ b/ui/repl.c
@@ -121,7 +121,10 @@ static NOINLINE int true_main(int argc, char *argv[])
if (start_client) {
JL_TRY {
+ size_t last_age = jl_get_ptls_states()->world_age;
+ jl_get_ptls_states()->world_age = jl_get_world_counter();
jl_apply(&start_client, 1);
+ jl_get_ptls_states()->world_age = last_age;
}
JL_CATCH {
jl_no_exc_handler(jl_exception_in_transit);