-
Notifications
You must be signed in to change notification settings - Fork 109
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
mlen superlearner for MIMO multi-input multi-output #134
Comments
Hi! Thanks for raising this issue - this would indeed be a good feature to have. For the time being, it isn't possible with the ready-made ensemble classes we have. We can easily fix that with some tweaks. Basically, you need to set the For the time being, you need to set up a custom class from mlens.parallel import Learner, Transformer, Pipeline, Group, Layer, make_group
from mlens.ensemble import BaseEnsemble
from mlens.index import FoldIndex, FullIndex
from mlens.utils import check_instances
from mlens.ensemble.base import check_kwargs
# First, your new Learner. ``num_targets`` will be your multi-output dimensionality.
class MultiLearner(Learner):
def __init__(self, estimator, num_targets, **kwargs):
super(MultiLearner, self).__init__(estimator, **kwargs)
self.num_targets = num_targets
def _get_multiplier(self, X, y):
return self.num_targets
def make_multi_group(indexer, estimators, preprocessing,
learner_kwargs=None, transformer_kwargs=None, name=None):
preprocessing, estimators = check_instances(estimators, preprocessing)
if learner_kwargs is None:
learner_kwargs = {}
if transformer_kwargs is None:
transformer_kwargs = {}
transformers = [Transformer(estimator=Pipeline(tr, return_y=True),
name=case_name, **transformer_kwargs)
for case_name, tr in preprocessing]
# We use your new MultiLearner class here
learners = [MultiLearner(estimator=est, preprocess=case_name,
name=learner_name, **learner_kwargs)
for case_name, learner_name, est in estimators]
group = Group(indexer=indexer, learners=learners,
transformers=transformers, name=name)
return group
# Change the make_group function in the base ensemble class
class MultiBaseEnsemble(BaseEnsemble):
def _build_layer(self, estimators, indexer, preprocessing, **kwargs):
check_kwargs(kwargs, ['backend', 'n_jobs'])
verbose = kwargs.pop('verbose', max(self._backend.verbose - 1, 0))
dtype = kwargs.pop('dtype', self._backend.dtype)
propagate = kwargs.pop('propagate_features', None)
shuffle = kwargs.pop('shuffle', self.shuffle)
random_state = kwargs.pop('random_state', self.random_state)
rs = kwargs.pop('raise_on_exception', self.raise_on_exception)
if random_state:
random_state = check_random_state(random_state).randint(0, 10000)
kwargs['verbose'] = max(verbose - 1, 0)
kwargs['scorer'] = kwargs.pop('scorer', self.scorer)
# We use your make_multi_group function from above
group = make_multi_group(indexer, estimators, preprocessing, kwargs)
name = "layer-%i" % (len(self._backend.stack) + 1) # Start count at 1
lyr = Layer(
name=name, dtype=dtype, shuffle=shuffle,
random_state=random_state, verbose=verbose,
raise_on_exception=rs, propagate_features=propagate)
lyr.push(group)
return lyr
# Finally, build the SuperLearner (or similar)
class MultiSuperLearner(MultiBaseEnsemble):
def __init__(
self, folds=2, shuffle=False, random_state=None, scorer=None,
raise_on_exception=True, array_check=None, verbose=False, n_jobs=-1,
backend='threading', model_selection=False, sample_size=20, layers=None):
super(MultiSuperLearner, self).__init__(
shuffle=shuffle, random_state=random_state, scorer=scorer,
raise_on_exception=raise_on_exception, verbose=verbose,
n_jobs=n_jobs, layers=layers, backend=backend,
array_check=array_check, model_selection=model_selection,
sample_size=sample_size)
self.__initialized__ = 0 # Unlock parameter setting
self.folds = folds
self.__initialized__ = 1 # Protect against param resets
def add_meta(self, estimator, **kwargs):
return self.add(estimators=estimator, meta=True, **kwargs)
def add(self, estimators, num_targets=1, preprocessing=None,
proba=False, meta=False, propagate_features=None, **kwargs):
c = kwargs.pop('folds', self.folds)
if meta:
idx = FullIndex()
else:
idx = FoldIndex(c, raise_on_exception=self.raise_on_exception)
return super(MultiSuperLearner, self).add(
estimators=estimators, num_targets=num_targets, indexer=idx, preprocessing=preprocessing,
proba=proba, propagate_features=propagate_features, **kwargs) You can now use this class as you normally would: X, y = make_regression(n_samples=1000, n_features=10, n_informative=5, n_targets=2, random_state=1, noise=0.5)
ensemble = MultiSuperLearner()
ensemble.add([LinearRegression()], num_targets=2)
ensemble.add_meta(LinearRegression(), num_targets=2)
ensemble.fit(X, y) |
Please let me know if the current version of mlen can be used for MIMO. If yes, is there any example of its usage?
there is an example of mlen usage in this link https://machinelearningmastery.com/super-learner-ensemble-in-python/#comment-556808 but it does not work for MIMO problems.
Thanks in advance.
The text was updated successfully, but these errors were encountered: