Skip to content

Commit

Permalink
[MRG+1] add scorer based on explained_variance_score (scikit-learn#9259)
Browse files Browse the repository at this point in the history
  • Loading branch information
qinhanmin2014 authored and lesteve committed Aug 8, 2017
1 parent 6d4ae1b commit 9f91ec7
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 7 deletions.
3 changes: 2 additions & 1 deletion doc/modules/model_evaluation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ Scoring Function
'v_measure_score' :func:`metrics.v_measure_score`

**Regression**
'explained_variance' :func:`metrics.explained_variance_score`
'neg_mean_absolute_error' :func:`metrics.mean_absolute_error`
'neg_mean_squared_error' :func:`metrics.mean_squared_error`
'neg_mean_squared_log_error' :func:`metrics.mean_squared_log_error`
Expand All @@ -101,7 +102,7 @@ Usage examples:
>>> model = svm.SVC()
>>> cross_val_score(model, X, y, scoring='wrong_choice')
Traceback (most recent call last):
ValueError: 'wrong_choice' is not a valid scoring value. Valid options are ['accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'average_precision', 'completeness_score', 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'fowlkes_mallows_score', 'homogeneity_score', 'mutual_info_score', 'neg_log_loss', 'neg_mean_absolute_error', 'neg_mean_squared_error', 'neg_mean_squared_log_error', 'neg_median_absolute_error', 'normalized_mutual_info_score', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc', 'v_measure_score']
ValueError: 'wrong_choice' is not a valid scoring value. Valid options are ['accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'average_precision', 'completeness_score', 'explained_variance', 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'fowlkes_mallows_score', 'homogeneity_score', 'mutual_info_score', 'neg_log_loss', 'neg_mean_absolute_error', 'neg_mean_squared_error', 'neg_mean_squared_log_error', 'neg_median_absolute_error', 'normalized_mutual_info_score', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc', 'v_measure_score']

.. note::

Expand Down
3 changes: 3 additions & 0 deletions doc/whats_new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,9 @@ Model selection and evaluation
:class:`model_selection.RepeatedStratifiedKFold`.
:issue:`8120` by `Neeraj Gangwar`_.

- Added a scorer based on :class:`metrics.explained_variance_score`.
:issue:`9259` by `Hanmin Qin <https://github.com/qinhanmin2014>`_.

Miscellaneous

- Validation that input data contains no NaN or inf can now be suppressed
Expand Down
7 changes: 5 additions & 2 deletions sklearn/metrics/scorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, mean_squared_log_error, accuracy_score,
f1_score, roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
precision_score, recall_score, log_loss,
explained_variance_score)

from .cluster import adjusted_rand_score
from .cluster import homogeneity_score
Expand Down Expand Up @@ -463,6 +464,7 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False,


# Standard regression scores
explained_variance_scorer = make_scorer(explained_variance_score)
r2_scorer = make_scorer(r2_score)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
Expand Down Expand Up @@ -525,7 +527,8 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False,
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)


SCORERS = dict(r2=r2_scorer,
SCORERS = dict(explained_variance=explained_variance_scorer,
r2=r2_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
Expand Down
8 changes: 4 additions & 4 deletions sklearn/metrics/tests/test_score_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
Expand All @@ -42,8 +41,9 @@
from sklearn.externals import joblib


REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
REGRESSION_SCORERS = ['explained_variance', 'r2',
'neg_mean_absolute_error', 'neg_mean_squared_error',
'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']

Expand All @@ -68,7 +68,7 @@

def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr = DecisionTreeRegressor(random_state=0)
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
Expand Down

0 comments on commit 9f91ec7

Please sign in to comment.