Skip to content

Commit

Permalink
rem superflous try-except block in _summarize
Browse files Browse the repository at this point in the history
  • Loading branch information
flennerhag committed May 18, 2017
1 parent 0a04d98 commit 9273cbd
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 33 deletions.
16 changes: 2 additions & 14 deletions mlens/model_selection/model_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,23 +541,11 @@ def _summarize(self, cv_res):

if best_data is None:
best_data, best_draw = draw_data, draw_num

try:
best_data['params'] = \
self.params[case_est][best_draw]
except KeyError:
best_data['params'] = \
self.params[case_est][best_draw]
best_data['params'] = self.params[case_est][best_draw]

if draw_data['test_score_mean'] > best_data['test_score_mean']:
best_data, best_draw = draw_data, draw_num

try:
best_data['params'] = \
self.params[case_est][best_draw]
except KeyError:
best_data['params'] = \
self.params[case_est][best_draw]
best_data['params'] = self.params[case_est][best_draw]

# Assign data associated with best test score to summary dict
# We invert the dictionary nesting here
Expand Down
45 changes: 26 additions & 19 deletions mlens/model_selection/tests/test_model_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,13 +92,15 @@ def test_w_prep():
evl = Evaluator(mape_scorer, cv=5, shuffle=False, random_state=100)

# Preprocessing
evl.preprocess(X, y, {'pr': [Scale()], 'no': []})
with open(os.devnull, 'w') as f, redirect_stderr(f):

evl.preprocess(X, y, {'pr': [Scale()], 'no': []})

# Fitting
evl.evaluate(X, y,
estimators=[OLS()],
param_dicts={'ols': {'offset': randint(1, 10)}},
n_iter=3)
# Fitting
evl.evaluate(X, y,
estimators=[OLS()],
param_dicts={'ols': {'offset': randint(1, 10)}},
n_iter=3)

np.testing.assert_approx_equal(
evl.summary['test_score_mean'][('no', 'ols')],
Expand All @@ -114,13 +116,16 @@ def test_w_prep():

def test_w_prep_fit():
"""[Model Selection] Test run with preprocessing, single step."""
evl = Evaluator(mape_scorer, cv=5, shuffle=False, random_state=100)
evl = Evaluator(mape_scorer, cv=5, shuffle=False, random_state=100,
verbose=True)

evl.fit(X, y,
estimators=[OLS()],
param_dicts={'ols': {'offset': randint(1, 10)}},
preprocessing={'pr': [Scale()], 'no': []},
n_iter=3)
with open(os.devnull, 'w') as f, redirect_stderr(f):

evl.fit(X, y,
estimators=[OLS()],
param_dicts={'ols': {'offset': randint(1, 10)}},
preprocessing={'pr': [Scale()], 'no': []},
n_iter=3)

np.testing.assert_approx_equal(
evl.summary['test_score_mean'][('no', 'ols')],
Expand All @@ -136,18 +141,20 @@ def test_w_prep_fit():

def test_w_prep_set_params():
"""[Model Selection] Test run with preprocessing, sep param dists."""
evl = Evaluator(mape_scorer, cv=5, shuffle=False, random_state=100)
evl = Evaluator(mape_scorer, cv=5, shuffle=False, random_state=100,
verbose=True)

params = {('no', 'ols'): {'offset': randint(3, 6)},
('pr', 'ols'): {'offset': randint(1, 3)},
}

# Fitting
evl.fit(X, y,
estimators={'pr': [OLS()], 'no': [OLS()]},
param_dicts=params,
preprocessing={'pr': [Scale()], 'no': []},
n_iter=3)
with open(os.devnull, 'w') as f, redirect_stderr(f):

evl.fit(X, y,
estimators={'pr': [OLS()], 'no': [OLS()]},
param_dicts=params,
preprocessing={'pr': [Scale()], 'no': []},
n_iter=3)

np.testing.assert_approx_equal(
evl.summary['test_score_mean'][('no', 'ols')],
Expand Down

0 comments on commit 9273cbd

Please sign in to comment.