Skip to content

Commit

Permalink
Revert "experimental commit: adding serialization lock in run_evolve"
Browse files Browse the repository at this point in the history
This reverts commit 54de980.
mlooz committed May 6, 2020
1 parent b07b2ca commit 925de00
Showing 2 changed files with 63 additions and 69 deletions.
128 changes: 62 additions & 66 deletions pygmo/_py_algorithms.py
Original file line number Diff line number Diff line change
@@ -6,12 +6,11 @@
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.

import numpy

import typing

from . import s_policy, select_best


class scipy_optimize:
"""
This class is a user defined algorithm (UDA) providing a wrapper around the function scipy.optimize.minimize.
@@ -53,6 +52,18 @@ class scipy_optimize:
54
"""

def _maybe_jit(func):
"""
This function tries to import the just-in-time compiler from numba and apply it to the passed function.
If the import fails, the argument is returned unchanged.
"""
try:
from numba import jit

return jit(nopython=True)(func)
except ModuleNotFoundError:
return func

class _fitness_wrapper:
"""
A helper class to prevent redundant evaluations of the fitness function.
@@ -121,40 +132,6 @@ def gradient_func(x, *args, **kwargs):

return gradient_func

def _maybe_jit(func):
"""
This function tries to import the just-in-time compiler from numba and apply it to the passed function.
If the import fails, the argument is returned unchanged.
"""
try:
from numba import jit

return jit(nopython=True)(func)
except ModuleNotFoundError:
return func

@staticmethod
@_maybe_jit
def _unpack_sparse_gradient(
sparse_values: typing.Mapping[int, float],
idx: int,
shape: typing.Tuple[int],
sparsity_pattern,
invert_sign: bool = False,
):
nnz = len(sparse_values)
sign = 1
if invert_sign:
sign = -1

result = numpy.zeros(shape)
for i in range(nnz):
# filter for just the dimension we need
if sparsity_pattern[i][0] == idx:
result[sparsity_pattern[i][1]] = sign * sparse_values[i]

return result

def _generate_gradient_sparsity_wrapper(self, idx: int):
"""
A function to extract a sparse gradient from a pygmo problem to a dense gradient expectecd by scipy.
@@ -194,6 +171,27 @@ def _generate_gradient_sparsity_wrapper(self, idx: int):
+ str(self.problem.get_nf())
)

@scipy_optimize._maybe_jit
def _unpack_sparse_gradient(
sparse_values: typing.Mapping[int, float],
idx: int,
shape: typing.Tuple[int],
sparsity_pattern,
invert_sign: bool = False,
) -> numpy.ndarray:
nnz = len(sparse_values)
sign = 1
if invert_sign:
sign = -1

result = numpy.zeros(shape)
for i in range(nnz):
# filter for just the dimension we need
if sparsity_pattern[i][0] == idx:
result[sparsity_pattern[i][1]] = sign * sparse_values[i]

return result

def wrapper(*args, **kwargs) -> numpy.ndarray:
"""
Calls the gradient callable and returns dense representation along a fixed dimension
@@ -222,40 +220,12 @@ def wrapper(*args, **kwargs) -> numpy.ndarray:
+ " non-zeros, but sparsity pattern has "
+ str(len(sparsity_pattern))
)
return scipy_optimize._fitness_wrapper._unpack_sparse_gradient(
return _unpack_sparse_gradient(
sparse_values, idx, dim, sparsity_pattern, invert_sign
)

return wrapper

@staticmethod
@_maybe_jit
def _unpack_sparse_hessian(
sparse_values: typing.Mapping[int, float],
idx: int,
shape: typing.Tuple[int, int],
sparsity_pattern,
invert_sign: bool = False,
):

nnz = len(sparse_values)
sign = 1
if invert_sign:
sign = -1

result = numpy.zeros(shape)
for i in range(nnz):
result[sparsity_pattern[i][0]][sparsity_pattern[i][1]] = (
sign * sparse_values[i]
)
# symmetrize matrix. Decided against a check for redundancy,
# since branching within the loop is too expensive
result[sparsity_pattern[i][1]][sparsity_pattern[i][0]] = (
sign * sparse_values[i]
)

return result

def _generate_hessian_sparsity_wrapper(self, idx: int):
"""
A function to extract a hessian gradient from a pygmo problem to a dense hessian expectecd by scipy.
@@ -296,6 +266,32 @@ def _generate_hessian_sparsity_wrapper(self, idx: int):
)
shape: typing.Tuple[int, int] = (dim, dim)

@scipy_optimize._maybe_jit
def _unpack_sparse_hessian(
sparse_values: typing.Mapping[int, float],
idx: int,
shape: typing.Tuple[int, int],
sparsity_pattern,
invert_sign: bool = False,
) -> numpy.ndarray:
nnz = len(sparse_values)
sign = 1
if invert_sign:
sign = -1

result = numpy.zeros(shape)
for i in range(nnz):
result[sparsity_pattern[i][0]][sparsity_pattern[i][1]] = (
sign * sparse_values[i]
)
# symmetrize matrix. Decided against a check for redundancy,
# since branching within the loop is too expensive
result[sparsity_pattern[i][1]][sparsity_pattern[i][0]] = (
sign * sparse_values[i]
)

return result

def wrapper(*args, **kwargs) -> numpy.ndarray:
"""
Calls the hessian callable and returns dense representation along a fixed dimension
@@ -325,7 +321,7 @@ def wrapper(*args, **kwargs) -> numpy.ndarray:
+ str(len(sparsity_pattern))
)

return scipy_optimize._fitness_wrapper._unpack_sparse_hessian(
return _unpack_sparse_hessian(
sparse_values, idx, shape, sparsity_pattern, invert_sign
)

4 changes: 1 addition & 3 deletions pygmo/_py_islands.py
Original file line number Diff line number Diff line change
@@ -99,7 +99,6 @@ class mp_island(object):
"""

# Static variables for the pool.
_pickle_lock = _Lock()
_pool_lock = _Lock()
_pool = None
_pool_size = None
@@ -201,8 +200,7 @@ def run_evolve(self, algo, pop):
# than failing in the bootstrap phase of the remote process, which
# can lead to hangups.
import pickle
with self._pickle_lock:
ser_algo_pop = pickle.dumps((algo, pop))
ser_algo_pop = pickle.dumps((algo, pop))

if self._use_pool:
with mp_island._pool_lock:

0 comments on commit 925de00

Please sign in to comment.