From 6fd91b287f63edc301a2ab219b7a145f5c61930d Mon Sep 17 00:00:00 2001
From: pabloferz
Date: Tue, 9 Aug 2016 21:01:50 +0200
Subject: [PATCH] Improve inferability of promote_op
---
base/abstractarray.jl | 10 +++++-----
base/arraymath.jl | 24 ++++++++++++------------
base/promotion.jl | 23 +++++++++++------------
3 files changed, 28 insertions(+), 29 deletions(-)
diff --git a/base/abstractarray.jl b/base/abstractarray.jl
index 65303a33d7b08..036f272d5b3a5 100644
--- a/base/abstractarray.jl
+++ b/base/abstractarray.jl
@@ -1578,11 +1578,11 @@ end
# These are needed because map(eltype, As) is not inferrable
promote_eltype_op(::Any) = (@_pure_meta; Bottom)
-promote_eltype_op(op, A) = (@_pure_meta; _promote_op(op, eltype(A)))
-promote_eltype_op{T}(op, ::AbstractArray{T}) = (@_pure_meta; _promote_op(op, T))
-promote_eltype_op{T}(op, ::AbstractArray{T}, A) = (@_pure_meta; _promote_op(op, T, eltype(A)))
-promote_eltype_op{T}(op, A, ::AbstractArray{T}) = (@_pure_meta; _promote_op(op, eltype(A), T))
-promote_eltype_op{R,S}(op, ::AbstractArray{R}, ::AbstractArray{S}) = (@_pure_meta; _promote_op(op, R, S))
+promote_eltype_op(op, A) = (@_pure_meta; promote_op(op, eltype(A)))
+promote_eltype_op{T}(op, ::AbstractArray{T}) = (@_pure_meta; promote_op(op, T))
+promote_eltype_op{T}(op, ::AbstractArray{T}, A) = (@_pure_meta; promote_op(op, T, eltype(A)))
+promote_eltype_op{T}(op, A, ::AbstractArray{T}) = (@_pure_meta; promote_op(op, eltype(A), T))
+promote_eltype_op{R,S}(op, ::AbstractArray{R}, ::AbstractArray{S}) = (@_pure_meta; promote_op(op, R, S))
promote_eltype_op(op, A, B, C, D...) = (@_pure_meta; promote_eltype_op(op, promote_eltype_op(op, A, B), C, D...))
## 1 argument
diff --git a/base/arraymath.jl b/base/arraymath.jl
index 0de1bd39486f5..171d301fb48e9 100644
--- a/base/arraymath.jl
+++ b/base/arraymath.jl
@@ -46,8 +46,8 @@ promote_array_type{S<:Integer}(::typeof(.\), ::Type{S}, ::Type{Bool}, T::Type) =
promote_array_type{S<:Integer}(F, ::Type{S}, ::Type{Bool}, T::Type) = T
for f in (:+, :-, :div, :mod, :&, :|, :$)
- @eval ($f)(A::AbstractArray, B::AbstractArray) =
- _elementwise($f, promote_op($f, eltype(A), eltype(B)), A, B)
+ @eval ($f){R,S}(A::AbstractArray{R}, B::AbstractArray{S}) =
+ _elementwise($f, promote_op($f, R, S), A, B)
end
function _elementwise(op, ::Type{Any}, A::AbstractArray, B::AbstractArray)
promote_shape(A, B) # check size compatibility
@@ -63,21 +63,21 @@ end
for f in (:.+, :.-, :.*, :./, :.\, :.^, :.÷, :.%, :.<<, :.>>, :div, :mod, :rem, :&, :|, :$)
@eval begin
- function ($f)(A::Number, B::AbstractArray)
- P = promote_op($f, typeof(A), eltype(B))
- T = promote_array_type($f, typeof(A), eltype(B), P)
- T === Any && return [($f)(A, b) for b in B]
- F = similar(B, T)
+ function ($f){T}(A::Number, B::AbstractArray{T})
+ R = promote_op($f, typeof(A), T)
+ S = promote_array_type($f, typeof(A), T, R)
+ S === Any && return [($f)(A, b) for b in B]
+ F = similar(B, S)
for (iF, iB) in zip(eachindex(F), eachindex(B))
@inbounds F[iF] = ($f)(A, B[iB])
end
return F
end
- function ($f)(A::AbstractArray, B::Number)
- P = promote_op($f, eltype(A), typeof(B))
- T = promote_array_type($f, typeof(B), eltype(A), P)
- T === Any && return [($f)(a, B) for a in A]
- F = similar(A, T)
+ function ($f){T}(A::AbstractArray{T}, B::Number)
+ R = promote_op($f, T, typeof(B))
+ S = promote_array_type($f, typeof(B), T, R)
+ S === Any && return [($f)(a, B) for a in A]
+ F = similar(A, S)
for (iF, iA) in zip(eachindex(F), eachindex(A))
@inbounds F[iF] = ($f)(A[iA], B)
end
diff --git a/base/promotion.jl b/base/promotion.jl
index 449dae5add81a..4952126dc8cd8 100644
--- a/base/promotion.jl
+++ b/base/promotion.jl
@@ -220,19 +220,17 @@ minmax(x::Real, y::Real) = minmax(promote(x, y)...)
# "Promotion" that takes a function into account. These are meant to be
# used mainly by broadcast methods, so it is advised against overriding them
if isdefined(Core, :Inference)
- function _promote_op(op, T::Type)
+ function _promote_op(op, T::ANY)
G = Tuple{Generator{Tuple{T},typeof(op)}}
- R = Core.Inference.return_type(first, G)
- return isleaftype(R) ? R : Any
+ return Core.Inference.return_type(first, G)
end
- function _promote_op(op, R::Type, S::Type)
+ function _promote_op(op, R::ANY, S::ANY)
F = typeof(a -> op(a...))
G = Tuple{Generator{Zip2{Tuple{R},Tuple{S}},F}}
- T = Core.Inference.return_type(first, G)
- return isleaftype(T) ? T : Any
+ return Core.Inference.return_type(first, G)
end
else
- _promote_op(::Any...) = (@_pure_meta; Any)
+ _promote_op(::ANY...) = (@_pure_meta; Any)
end
_default_type(T::Type) = (@_pure_meta; T)
@@ -240,14 +238,15 @@ promote_op(::Any...) = (@_pure_meta; Any)
promote_op(T::Type, ::Any) = (@_pure_meta; T)
promote_op(T::Type, ::Type) = (@_pure_meta; T) # To handle ambiguities
# Promotion that tries to preserve non-concrete types
-function promote_op(f, S::Type)
+function promote_op{S}(f, ::Type{S})
T = _promote_op(f, _default_type(S))
- return isleaftype(S) ? T : typejoin(S, T)
+ isleaftype(S) && return isleaftype(T) ? T : Any
+ return typejoin(S, T)
end
-function promote_op(f, R::Type, S::Type)
+function promote_op{R,S}(f, ::Type{R}, ::Type{S})
T = _promote_op(f, _default_type(R), _default_type(S))
- isleaftype(R) && return isleaftype(S) ? T : typejoin(S, T)
- return isleaftype(S) ? typejoin(R, T) : typejoin(R, S, T)
+ isleaftype(R) && isleaftype(S) && return isleaftype(T) ? T : Any
+ return typejoin(R, S, T)
end
## catch-alls to prevent infinite recursion when definitions are missing ##