Skip to content

Commit

Permalink
NeuralMAB (#102)
Browse files Browse the repository at this point in the history
* fix

* fix#2

* minor

* initial neural mab

* add context agents enum

* add experiments

* launch

* minor

* add contextual mab to pull arms

* put NN in a separate class & add docstrings

* adjust settings

* change places of mutations

* fixes after review

* minors

* minors

* add contextual bandits

* add experimenter & multiple fitness lines visualizer

* experiments

* probabilities for contexts

* minors & docstrings

* fix pep8

* fix requirements

* minor

* minor

* add average visualizatio

* fixes after review

* minors

* update requirements

* Update unit-build.yml

* Update unit-build.yml

* fix pep8

* Update unit-build.yml
  • Loading branch information
maypink authored Jun 9, 2023
1 parent 3cc87ea commit 072f027
Show file tree
Hide file tree
Showing 24 changed files with 946 additions and 49 deletions.
1 change: 1 addition & 0 deletions .github/workflows/unit-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ jobs:
pip install .[docs]
pip install .[profilers]
pip install pytest-cov
pip install -r requirements_adaptive.txt
- name: Test with pytest
run: |
pytest --cov=golem test/unit
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from examples.adaptive_optimizer.experiment_setup import run_adaptive_mutations_with_context
from examples.adaptive_optimizer.mab_experiment_different_targets import run_experiment_node_num, \
run_experiment_edge_num, run_experiment_graphs_ratio_edges_nodes, run_experiment_trees
from golem.core.optimisers.adaptive.operator_agent import MutationAgentTypeEnum


if __name__ == '__main__':
"""Run adaptive optimizer on different targets to see how neural multi-armed bandit agent converges
to different probabilities of actions (i.e. mutations) for different targets."""
adaptive_mutation_type = MutationAgentTypeEnum.contextual_bandit

run_experiment_node_num(trial_timeout=2, adaptive_mutation_type=adaptive_mutation_type,
run_func=run_adaptive_mutations_with_context)
run_experiment_edge_num(trial_timeout=2, adaptive_mutation_type=adaptive_mutation_type,
run_func=run_adaptive_mutations_with_context)
run_experiment_trees(trial_timeout=10, trial_iterations=2000, adaptive_mutation_type=adaptive_mutation_type,
run_func=run_adaptive_mutations_with_context)
run_experiment_graphs_ratio_edges_nodes(trial_timeout=10, trial_iterations=2000,
adaptive_mutation_type=adaptive_mutation_type,
run_func=run_adaptive_mutations_with_context)
52 changes: 50 additions & 2 deletions examples/adaptive_optimizer/experiment_setup.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from pprint import pprint
from typing import List, Sequence, Optional
from typing import List, Sequence, Optional, Dict

import networkx as nx
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans

from examples.synthetic_graph_evolution.utils import draw_graphs_subplots
from examples.adaptive_optimizer.utils import plot_action_values
Expand All @@ -23,7 +25,7 @@ def run_adaptive_mutations(
stats_action_value_log: List[List[float]] = []

def log_action_values(next_pop: PopulationT, optimizer: EvoGraphOptimizer):
values = optimizer.mutation.agent.get_action_values(obs=None)
values = optimizer.mutation.agent.get_action_values(obs=next_pop[0])
stats_action_value_log.append(list(values))

# Setup the logger and run the optimizer
Expand All @@ -47,3 +49,49 @@ def log_action_values(next_pop: PopulationT, optimizer: EvoGraphOptimizer):
plot_action_values(stats_action_value_log, action_tags=agent.actions)
plt.show()
return stats_action_value_log


def run_adaptive_mutations_with_context(
optimizer: EvoGraphOptimizer,
objective: Objective,
target: Optional[nx.DiGraph] = None,
visualize: bool = True,
n_clusters: int = 2
):
"""This experiment setup outputs graphic of relative action probabilities
for given target/objective and given optimizer setup."""
stats_action_value_log: Dict[int, List[List[float]]] = dict()
cluster = KMeans(n_clusters=n_clusters)

def log_action_values_with_clusters(next_pop: PopulationT, optimizer: EvoGraphOptimizer):
obs_contexts = optimizer.mutation.agent.get_context(next_pop)
cluster.fit(np.array(obs_contexts).reshape(-1, 1))
centers = cluster.cluster_centers_
for i, center in enumerate(centers):
values = optimizer.mutation.agent.get_action_values(obs=center)
if i not in stats_action_value_log.keys():
stats_action_value_log[i] = []
stats_action_value_log[i].append(list(values))

# Setup the logger and run the optimizer
optimizer.set_iteration_callback(log_action_values_with_clusters)
found_graphs = optimizer.optimise(objective)
found_graph = found_graphs[0] if isinstance(found_graphs, Sequence) else found_graphs
history = optimizer.history
agent = optimizer.mutation.agent

print('History of action probabilities:')
pprint(stats_action_value_log)
if visualize:
found_nx_graph = BaseNetworkxAdapter().restore(found_graph)
final_metrics = objective(found_nx_graph).value
if target is not None:
draw_graphs_subplots(target, found_nx_graph,
titles=['Target Graph', f'Found Graph (fitness={final_metrics})'])
else:
draw_graphs_subplots(found_nx_graph, titles=[f'Found Graph (fitness={final_metrics})'])
history.show.fitness_line()
for i in range(n_clusters):
plot_action_values(stats_action_value_log[i], action_tags=agent.actions)
plt.show()
return stats_action_value_log
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from datetime import timedelta
from functools import partial
from typing import Optional, Sequence
from typing import Optional, Sequence, Callable
import networkx as nx

from examples.adaptive_optimizer.experiment_setup import run_adaptive_mutations
Expand Down Expand Up @@ -37,23 +37,25 @@ def generate_trees(graph_sizes: Sequence[int], node_types: Sequence[str] = ('x',
return trees


def get_graph_gp_params(objective: Objective):
def get_graph_gp_params(objective: Objective, adaptive_mutation_type: MutationAgentTypeEnum, pop_size: int = None):
return GPAlgorithmParameters(
adaptive_mutation_type=MutationAgentTypeEnum.bandit,
pop_size=21,
adaptive_mutation_type=adaptive_mutation_type,
pop_size=pop_size or 21,
multi_objective=objective.is_multi_objective,
genetic_scheme_type=GeneticSchemeTypesEnum.generational,
mutation_types=[
MutationTypesEnum.single_add,
MutationTypesEnum.single_edge,
MutationTypesEnum.single_drop,
MutationTypesEnum.single_edge,
MutationTypesEnum.single_add
],
crossover_types=[CrossoverTypesEnum.none]
)


def run_experiment_node_num(target_sizes: Sequence[int] = (100, 400),
trial_timeout: int = 15):
def run_experiment_node_num(adaptive_mutation_type: MutationAgentTypeEnum,
target_sizes: Sequence[int] = (50, 400),
trial_timeout: int = 15,
run_func: Callable = run_adaptive_mutations):
for target_size in target_sizes:
# Setup simple objective that searches for required graph size (number of nodes)
objective = Objective({'graph_size': lambda graph: abs(target_size -
Expand All @@ -63,15 +65,18 @@ def run_experiment_node_num(target_sizes: Sequence[int] = (100, 400),
optimizer, _ = graph_search_setup(
objective=objective,
optimizer_cls=EvoGraphOptimizer,
algorithm_parameters=get_graph_gp_params(objective),
algorithm_parameters=get_graph_gp_params(objective=objective,
adaptive_mutation_type=adaptive_mutation_type),
timeout=timedelta(minutes=trial_timeout),
num_iterations=target_size * 3,
)
run_adaptive_mutations(optimizer, objective, visualize=True)
run_func(optimizer, objective, visualize=True)


def run_experiment_edge_num(target_sizes: Sequence[int] = (100, 400),
trial_timeout: int = 15):
def run_experiment_edge_num(adaptive_mutation_type: MutationAgentTypeEnum,
target_sizes: Sequence[int] = (100, 400),
trial_timeout: int = 15,
run_func: Callable = run_adaptive_mutations):
for target_size in target_sizes:
# Setup simple objective that searches for required graph size (number of nodes)
objective = Objective({'graph_size': lambda graph: abs(target_size -
Expand All @@ -81,14 +86,18 @@ def run_experiment_edge_num(target_sizes: Sequence[int] = (100, 400),
optimizer, _ = graph_search_setup(
objective=objective,
optimizer_cls=EvoGraphOptimizer,
algorithm_parameters=get_graph_gp_params(objective),
algorithm_parameters=get_graph_gp_params(objective=objective,
adaptive_mutation_type=adaptive_mutation_type),
timeout=timedelta(minutes=trial_timeout),
num_iterations=target_size * 3,
)
run_adaptive_mutations(optimizer, objective, visualize=True)
run_func(optimizer, objective, visualize=True)


def run_experiment_graphs_ratio_edges_nodes(trial_timeout: int = 15, trial_iterations: Optional[int] = 500):
def run_experiment_graphs_ratio_edges_nodes(adaptive_mutation_type: MutationAgentTypeEnum,
trial_timeout: int = 15,
trial_iterations: Optional[int] = 500,
run_func: Callable = run_adaptive_mutations):
"""In this experiment setup we generate different graphs with different ratios of #Edges/#Nodes.
Respectively, probabilities of adding edges and adding nodes must be different for different targets."""

Expand All @@ -111,16 +120,20 @@ def run_experiment_graphs_ratio_edges_nodes(trial_timeout: int = 15, trial_itera
optimizer, _ = graph_search_setup(
objective=objective,
optimizer_cls=EvoGraphOptimizer,
algorithm_parameters=get_graph_gp_params(objective),
algorithm_parameters=get_graph_gp_params(objective=objective,
adaptive_mutation_type=adaptive_mutation_type),
node_types=node_types,
timeout=timedelta(minutes=trial_timeout),
num_iterations=trial_iterations,
)

run_adaptive_mutations(optimizer, objective, target, visualize=True)
run_func(optimizer, objective, target, visualize=True)


def run_experiment_trees(trial_timeout: int = 15, trial_iterations: Optional[int] = 500):
def run_experiment_trees(adaptive_mutation_type: MutationAgentTypeEnum,
trial_timeout: int = 15,
trial_iterations: Optional[int] = 500,
run_func: Callable = run_adaptive_mutations):
node_types = ['x']
for target in generate_trees(graph_sizes=[20, 30, 50], node_types=node_types):
# Setup objective that measures some graph-theoretic similarity measure
Expand All @@ -137,7 +150,7 @@ def run_experiment_trees(trial_timeout: int = 15, trial_iterations: Optional[int
MutationTypesEnum.single_drop,
],
crossover_types=[CrossoverTypesEnum.none],
adaptive_mutation_type=MutationAgentTypeEnum.bandit,
adaptive_mutation_type=adaptive_mutation_type,
)

# Build the optimizer
Expand All @@ -150,14 +163,16 @@ def run_experiment_trees(trial_timeout: int = 15, trial_iterations: Optional[int
num_iterations=trial_iterations,
)

run_adaptive_mutations(optimizer, objective, target, visualize=True)
run_func(optimizer, objective, target, visualize=True)


if __name__ == '__main__':
"""Run adaptive optimizer on different targets to see how adaptive agent converges
to different probabilities of actions (i.e. mutations) for different targets."""
adaptive_mutation_type = MutationAgentTypeEnum.bandit

run_experiment_node_num(trial_timeout=2)
run_experiment_edge_num(trial_timeout=2)
run_experiment_trees(trial_timeout=10, trial_iterations=2000)
run_experiment_graphs_ratio_edges_nodes(trial_timeout=10, trial_iterations=2000)
run_experiment_node_num(trial_timeout=2, adaptive_mutation_type=adaptive_mutation_type)
run_experiment_edge_num(trial_timeout=2, adaptive_mutation_type=adaptive_mutation_type)
run_experiment_trees(trial_timeout=10, trial_iterations=2000, adaptive_mutation_type=adaptive_mutation_type)
run_experiment_graphs_ratio_edges_nodes(trial_timeout=10, trial_iterations=2000,
adaptive_mutation_type=adaptive_mutation_type)
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from examples.adaptive_optimizer.mab_experiment_different_targets import run_experiment_node_num, \
run_experiment_edge_num, run_experiment_graphs_ratio_edges_nodes, run_experiment_trees
from golem.core.optimisers.adaptive.operator_agent import MutationAgentTypeEnum


if __name__ == '__main__':
"""Run adaptive optimizer on different targets to see how neural multi-armed bandit agent converges
to different probabilities of actions (i.e. mutations) for different targets."""
adaptive_mutation_type = MutationAgentTypeEnum.neural_bandit

run_experiment_node_num(trial_timeout=2, adaptive_mutation_type=adaptive_mutation_type)
run_experiment_edge_num(trial_timeout=2, adaptive_mutation_type=adaptive_mutation_type)
run_experiment_trees(trial_timeout=10, trial_iterations=2000, adaptive_mutation_type=adaptive_mutation_type)
run_experiment_graphs_ratio_edges_nodes(trial_timeout=10, trial_iterations=2000,
adaptive_mutation_type=adaptive_mutation_type)
14 changes: 10 additions & 4 deletions examples/adaptive_optimizer/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Sequence, Optional, Any
from typing import Sequence, Optional, Any, List

import numpy as np
from matplotlib import pyplot as plt
Expand All @@ -7,7 +7,8 @@

def plot_action_values(stats: Sequence[Sequence[float]],
action_tags: Optional[Sequence[Any]] = None,
size: float = 5.):
size: float = 5.,
titles: List[str] = None):
# Plot stackplot of how action expectations and probabilities changed
x = np.arange(len(stats))
y = np.array(stats).T
Expand All @@ -22,10 +23,15 @@ def plot_action_values(stats: Sequence[Sequence[float]],
ax0.grid()
ax1.stackplot(x, y_prob, labels=labels)

ax0.set_title('Action Expectation Values')
if not titles:
expectation_values_title = 'Action Expectation Values'
probabilities_title = 'Action Probabilities'
else:
expectation_values_title, probabilities_title = titles
ax0.set_title(expectation_values_title, size=10)
ax0.set_xlabel('Generation')
ax0.set_ylabel('Reward Expectation')
ax1.set_title('Action Probabilities')
ax1.set_title(probabilities_title, size=10)
ax1.set_xlabel('Generation')
ax1.set_ylabel('Probability')
ax1.set(ylim=(0, 1.0), yticks=np.linspace(0., 1., 21))
Expand Down
22 changes: 14 additions & 8 deletions examples/synthetic_graph_evolution/graph_search.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from datetime import timedelta
from functools import partial
from typing import Type, Optional, Sequence
from typing import Type, Optional, Sequence, List

import networkx as nx
from examples.synthetic_graph_evolution.experiment_setup import run_experiments
from examples.synthetic_graph_evolution.generators import generate_labeled_graph
from golem.core.adapter.nx_adapter import BaseNetworkxAdapter
from golem.core.dag.verification_rules import has_no_self_cycled_nodes
from golem.core.dag.graph import Graph
from golem.core.dag.verification_rules import DEFAULT_DAG_RULES
from golem.core.optimisers.adaptive.operator_agent import MutationAgentTypeEnum
from golem.core.optimisers.genetic.gp_optimizer import EvoGraphOptimizer
from golem.core.optimisers.genetic.gp_params import GPAlgorithmParameters
Expand All @@ -25,7 +25,10 @@ def graph_search_setup(target_graph: Optional[nx.DiGraph] = None,
algorithm_parameters: Optional[AlgorithmParameters] = None,
node_types: Sequence[str] = ('x',),
timeout: Optional[timedelta] = None,
num_iterations: Optional[int] = None):
num_iterations: Optional[int] = None,
initial_graph_sizes: Optional[List[int]] = None,
initial_graphs: List[Graph] = None,
pop_size: int = None):
if target_graph is not None and objective is not None:
raise ValueError('Please provide either target or objective, not both')
elif target_graph is not None:
Expand Down Expand Up @@ -61,7 +64,7 @@ def graph_search_setup(target_graph: Optional[nx.DiGraph] = None,
)
default_gp_params = GPAlgorithmParameters(
adaptive_mutation_type=MutationAgentTypeEnum.random,
pop_size=21,
pop_size=pop_size or 21,
multi_objective=objective.is_multi_objective,
genetic_scheme_type=GeneticSchemeTypesEnum.generational,
mutation_types=[
Expand All @@ -74,13 +77,16 @@ def graph_search_setup(target_graph: Optional[nx.DiGraph] = None,
gp_params = algorithm_parameters or default_gp_params
graph_gen_params = GraphGenerationParams(
adapter=BaseNetworkxAdapter(),
rules_for_constraint=[has_no_self_cycled_nodes],
rules_for_constraint=DEFAULT_DAG_RULES,
available_node_types=node_types,
)

# Generate simple initial population with line graphs
initial_graphs = [generate_labeled_graph('gnp', 7, node_types)
for _ in range(gp_params.pop_size)]
if not initial_graphs:
if not initial_graph_sizes:
initial_graph_sizes = [7] * gp_params.pop_size
initial_graphs = [nx.random_tree(initial_graph_sizes[i], create_using=nx.DiGraph)
for i in range(gp_params.pop_size)]
# Build the optimizer
optimiser = optimizer_cls(objective, initial_graphs, requirements, graph_gen_params, gp_params)
return optimiser, objective
Expand Down
Empty file added experiments/__init__.py
Empty file.
Empty file added experiments/mab/__init__.py
Empty file.
Loading

0 comments on commit 072f027

Please sign in to comment.