Skip to content

Commit

Permalink
merge
Browse files Browse the repository at this point in the history
  • Loading branch information
jsfreischuetz committed Jun 3, 2024
2 parents 08575af + 0b01029 commit 3904020
Show file tree
Hide file tree
Showing 5 changed files with 72 additions and 33 deletions.
4 changes: 1 addition & 3 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
// See Also:
// - https://github.com/microsoft/vscode/issues/2809#issuecomment-1544387883
// - mlos_bench/config/schemas/README.md

{
"fileMatch": [
"mlos_bench/mlos_bench/tests/config/schemas/environments/test-cases/**/*.jsonc",
Expand Down Expand Up @@ -136,8 +135,7 @@
// See Also .vscode/launch.json for environment variable args to pytest during debug sessions.
// For the rest, see setup.cfg
"python.testing.pytestArgs": [
"--log-level=DEBUG",
"."
],
"python.testing.unittestEnabled": false
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ def __init__(
self, # pylint: disable=too-many-locals
*, # pylint: disable=too-many-locals
parameter_space: ConfigSpace.ConfigurationSpace,
optimization_targets: str | List[str] | None = None,
optimization_targets: Union[str, List[str], None] = None,
objective_weights: Optional[List[float]] = None,
space_adapter: Optional[BaseSpaceAdapter] = None,
seed: Optional[int] = 0,
run_name: Optional[str] = None,
Expand All @@ -68,6 +69,9 @@ def __init__(
optimization_targets : List[str]
The names of the optimization targets to minimize.
objective_weights : Optional[List[float]]
Optional list of weights of optimization targets.
space_adapter : BaseSpaceAdapter
The space adapter class to employ for parameter space transformations.
Expand Down Expand Up @@ -125,6 +129,7 @@ def __init__(
super().__init__(
parameter_space=parameter_space,
optimization_targets=optimization_targets,
objective_weights=objective_weights,
space_adapter=space_adapter,
)

Expand Down Expand Up @@ -228,6 +233,8 @@ def __init__(
intensifier=intensifier_instance,
random_design=random_design,
config_selector=config_selector,
multi_objective_algorithm=Optimizer_Smac.get_multi_objective_algorithm(
scenario, objective_weights=self._objective_weights),
overwrite=True,
logging_level=False, # Use the existing logger
**SmacOptimizer._filter_kwargs(facade, **kwargs),
Expand Down
28 changes: 16 additions & 12 deletions mlos_core/mlos_core/optimizers/flaml_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,13 @@ class FlamlOptimizer(BaseOptimizer):
Wrapper class for FLAML Optimizer: A fast library for AutoML and tuning.
"""

# The name of an internal objective attribute that is calculated as a weighted average of the user provided objective metrics.
_METRIC_NAME = "FLAML_score"

def __init__(self, *, # pylint: disable=too-many-arguments
parameter_space: ConfigSpace.ConfigurationSpace,
optimization_targets: List[str],
objective_weights: Optional[List[float]] = None,
space_adapter: Optional[BaseSpaceAdapter] = None,
low_cost_partial_config: Optional[dict] = None,
seed: Optional[int] = None):
Expand All @@ -46,7 +50,9 @@ def __init__(self, *, # pylint: disable=too-many-arguments
optimization_targets : List[str]
The names of the optimization targets to minimize.
For FLAML it must be a list with a single element, e.g., `["score"]`.
objective_weights : Optional[List[float]]
Optional list of weights of optimization targets.
space_adapter : BaseSpaceAdapter
The space adapter class to employ for parameter space transformations.
Expand All @@ -61,13 +67,10 @@ def __init__(self, *, # pylint: disable=too-many-arguments
super().__init__(
parameter_space=parameter_space,
optimization_targets=optimization_targets,
objective_weights=objective_weights,
space_adapter=space_adapter,
)

if len(self._optimization_targets) != 1:
raise ValueError("FLAML does not support multi-target optimization")
self._flaml_optimization_target = self._optimization_targets[0]

# Per upstream documentation, it is recommended to set the seed for
# flaml at the start of its operation globally.
if seed is not None:
Expand Down Expand Up @@ -99,14 +102,15 @@ def _register(self, configurations: pd.DataFrame, scores: pd.DataFrame,
"""
if context is not None:
warn(f"Not Implemented: Ignoring context {list(context.columns)}", UserWarning)
for (_, config), score in zip(configurations.astype('O').iterrows(),
scores[self._flaml_optimization_target]):
for (_, config), (_, score) in zip(configurations.astype('O').iterrows(), scores.iterrows()):
cs_config: ConfigSpace.Configuration = ConfigSpace.Configuration(
self.optimizer_parameter_space, values=config.to_dict())
if cs_config in self.evaluated_samples:
warn(f"Configuration {config} was already registered", UserWarning)

self.evaluated_samples[cs_config] = EvaluatedSample(config=config.to_dict(), score=score)
self.evaluated_samples[cs_config] = EvaluatedSample(
config=config.to_dict(),
score=float(np.average(score.astype(float), weights=self._objective_weights)),
)

def _suggest(
self, context: Optional[pd.DataFrame] = None
Expand Down Expand Up @@ -149,11 +153,11 @@ def _target_function(self, config: dict) -> Union[dict, None]:
Returns
-------
result: Union[dict, None]
Dictionary with a single key, `score`, if config already evaluated; `None` otherwise.
Dictionary with a single key, `FLAML_score`, if config already evaluated; `None` otherwise.
"""
cs_config = normalize_config(self.optimizer_parameter_space, config)
if cs_config in self.evaluated_samples:
return {self._flaml_optimization_target: self.evaluated_samples[cs_config].score}
return {self._METRIC_NAME: self.evaluated_samples[cs_config].score}

self._suggested_config = dict(cs_config) # Cleaned-up version of the config
return None # Returning None stops the process
Expand Down Expand Up @@ -196,7 +200,7 @@ def _get_next_config(self) -> dict:
self._target_function,
config=self.flaml_parameter_space,
mode='min',
metric=self._flaml_optimization_target,
metric=self._METRIC_NAME,
points_to_evaluate=points_to_evaluate,
evaluated_rewards=evaluated_rewards,
num_samples=len(points_to_evaluate) + 1,
Expand Down
9 changes: 8 additions & 1 deletion mlos_core/mlos_core/optimizers/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ class BaseOptimizer(metaclass=ABCMeta):

def __init__(self, *,
parameter_space: ConfigSpace.ConfigurationSpace,
optimization_targets: str | List[str] | None = None,
optimization_targets: Union[str, List[str], None] = None,
objective_weights: Optional[List[float]] = None,
space_adapter: Optional[BaseSpaceAdapter] = None):
"""
Create a new instance of the base optimizer.
Expand All @@ -37,6 +38,8 @@ def __init__(self, *,
The parameter space to optimize.
optimization_targets : List[str]
The names of the optimization targets to minimize.
objective_weights : Optional[List[float]]
Optional list of weights of optimization targets.
space_adapter : BaseSpaceAdapter
The space adapter class to employ for parameter space transformations.
"""
Expand All @@ -48,6 +51,10 @@ def __init__(self, *,
raise ValueError("Given parameter space differs from the one given to space adapter")

self._optimization_targets = optimization_targets
self._objective_weights = objective_weights
if objective_weights is not None and len(objective_weights) != len(optimization_targets):
raise ValueError("Number of weights must match the number of optimization targets")

self._space_adapter: Optional[BaseSpaceAdapter] = space_adapter
self._observations: List[Tuple[pd.DataFrame, pd.DataFrame, Optional[pd.DataFrame]]] = []
self._has_context: Optional[bool] = None
Expand Down
55 changes: 39 additions & 16 deletions mlos_core/mlos_core/tests/optimizers/optimizer_multiobj_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,22 +7,49 @@
"""

import logging
from typing import List, Optional, Type

import pytest

import pandas as pd
import numpy as np
import ConfigSpace as CS

from mlos_core.optimizers import OptimizerType, OptimizerFactory
from mlos_core.optimizers import OptimizerType, BaseOptimizer

from mlos_core.tests import SEED


_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)


def test_multi_target_opt() -> None:
@pytest.mark.parametrize(('optimizer_class', 'kwargs'), [
*[(member.value, {}) for member in OptimizerType],
])
def test_multi_target_opt_wrong_weights(optimizer_class: Type[BaseOptimizer], kwargs: dict) -> None:
"""
Make sure that the optimizer raises an error if the number of objective weights
does not match the number of optimization targets.
"""
with pytest.raises(ValueError):
optimizer_class(
parameter_space=CS.ConfigurationSpace(seed=SEED),
optimization_targets=['main_score', 'other_score'],
objective_weights=[1],
**kwargs
)


@pytest.mark.parametrize(('objective_weights'), [
[2, 1],
[0.5, 0.5],
None,
])
@pytest.mark.parametrize(('optimizer_class', 'kwargs'), [
*[(member.value, {}) for member in OptimizerType],
])
def test_multi_target_opt(objective_weights: Optional[List[float]],
optimizer_class: Type[BaseOptimizer],
kwargs: dict) -> None:
"""
Toy multi-target optimization problem to test the optimizers with
mixed numeric types to ensure that original dtypes are retained.
Expand All @@ -32,7 +59,7 @@ def test_multi_target_opt() -> None:
def objective(point: pd.DataFrame) -> pd.DataFrame:
# mix of hyperparameters, optimal is to select the highest possible
return pd.DataFrame({
"score": point.x + point.y,
"main_score": point.x + point.y,
"other_score": point.x ** 2 + point.y ** 2,
})

Expand All @@ -43,15 +70,11 @@ def objective(point: pd.DataFrame) -> pd.DataFrame:
input_space.add_hyperparameter(
CS.UniformFloatHyperparameter(name='y', lower=0.0, upper=5.0))

optimizer = OptimizerFactory.create(
optimizer = optimizer_class(
parameter_space=input_space,
optimization_targets=['score', 'other_score'],
optimizer_type=OptimizerType.SMAC,
optimizer_kwargs={
# Test with default config.
'use_default_config': True,
# 'n_random_init': 10,
},
optimization_targets=['main_score', 'other_score'],
objective_weights=objective_weights,
**kwargs,
)

with pytest.raises(ValueError, match="No observations"):
Expand All @@ -75,21 +98,21 @@ def objective(point: pd.DataFrame) -> pd.DataFrame:
# Test registering the suggested configuration with a score.
observation = objective(suggestion)
assert isinstance(observation, pd.DataFrame)
assert set(observation.columns) == {'score', 'other_score'}
assert set(observation.columns) == {'main_score', 'other_score'}
optimizer.register(suggestion, observation, context)

(best_config, best_score, best_context) = optimizer.get_best_observations()
assert isinstance(best_config, pd.DataFrame)
assert isinstance(best_score, pd.DataFrame)
assert set(best_config.columns) == {'x', 'y'}
assert set(best_score.columns) == {'score', 'other_score'}
assert set(best_score.columns) == {'main_score', 'other_score'}
assert best_config.shape == (1, 2)
assert best_score.shape == (1, 2)

(all_configs, all_scores, all_contexts) = optimizer.get_observations()
assert isinstance(all_configs, pd.DataFrame)
assert isinstance(all_scores, pd.DataFrame)
assert set(all_configs.columns) == {'x', 'y'}
assert set(all_scores.columns) == {'score', 'other_score'}
assert set(all_scores.columns) == {'main_score', 'other_score'}
assert all_configs.shape == (max_iterations, 2)
assert all_scores.shape == (max_iterations, 2)

0 comments on commit 3904020

Please sign in to comment.