Skip to content
Snippets Groups Projects
Commit a723be1d authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Cleared metrics

parent b2a327fe
No related branches found
No related tags found
No related merge requests found
Pipeline #4092 passed
Showing
with 41 additions and 376 deletions
......@@ -25,11 +25,7 @@ def score(y_true, y_pred, multiclass=False, **kwargs):
Returns:
Weighted accuracy score for y_true, y_pred"""
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
score = metric(y_true, y_pred, sample_weight=sample_weight)
score = metric(y_true, y_pred, **kwargs)
return score
......@@ -39,19 +35,10 @@ def get_scorer(**kwargs):
Returns:
A weighted sklearn scorer for accuracy"""
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
return make_scorer(metric, greater_is_better=True,
sample_weight=sample_weight)
**kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
config_string = "Accuracy score using " + str(
sample_weight) + " as sample_weights (higher is better)"
config_string = "Accuracy score using {}, (higher is better)".format(kwargs)
return config_string
......@@ -14,75 +14,15 @@ __author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=True, **kwargs):
try:
sample_weight = kwargs["0"]
except:
sample_weight = None
try:
labels = kwargs["1"]
except:
labels = None
try:
pos_label = kwargs["2"]
except:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
if multiclass:
average = "micro"
else:
average = "micro"
score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels,
pos_label=pos_label, average=average)
def score(y_true, y_pred, multiclass=True, average='micro', **kwargs):
score = metric(y_true, y_pred, average=average, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except:
average = "micro"
return make_scorer(metric, greater_is_better=True,
sample_weight=sample_weight, labels=labels,
pos_label=pos_label, average=average)
def get_scorer(average="micro", **kwargs):
return make_scorer(metric, greater_is_better=True, average=average, **kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
average = "micro"
config_string = "F1 score using " + str(
sample_weight) + " as sample_weights, " + str(
labels) + " as labels, " + str(
pos_label) \
+ " as pos_label, " + average + " as average (higher is better)"
def get_config(average="micro", **kwargs, ):
config_string = "F1 score using average: {}, {} (higher is better)".format(average, kwargs)
return config_string
......@@ -10,86 +10,16 @@ __author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=True, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
beta = kwargs["1"]
except Exception:
beta = 10.0
try:
labels = kwargs["2"]
except Exception:
labels = None
try:
pos_label = kwargs["3"]
except Exception:
pos_label = 1
try:
average = kwargs["4"]
except Exception:
if multiclass:
average = "micro"
else:
average = "binary"
score = metric(y_true, y_pred, beta, sample_weight=sample_weight,
labels=labels, pos_label=pos_label,
average=average)
def score(y_true, y_pred, beta=2.0, average="micro", **kwargs):
score = metric(y_true, y_pred, beta=beta, average=average, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
beta = kwargs["1"]
except Exception:
beta = 1.0
try:
labels = kwargs["2"]
except Exception:
labels = None
try:
pos_label = kwargs["3"]
except Exception:
pos_label = 1
try:
average = kwargs["4"]
except Exception:
average = "micro"
def get_scorer(beta=2.0, average="micro", **kwargs):
return make_scorer(metric, greater_is_better=True, beta=beta,
sample_weight=sample_weight, labels=labels,
pos_label=pos_label, average=average)
average=average, **kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
beta = kwargs["1"]
except Exception:
beta = 1.0
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
average = "binary"
config_string = "F-beta score using " + str(
sample_weight) + " as sample_weights, " + str(
labels) + " as labels, " + str(pos_label) \
+ " as pos_label, " + average + " as average, " + str(
beta) + " as beta (higher is better)"
def get_config(beta=2.0, average="micro", **kwargs):
config_string = "F-beta score using beta: {}, average: {}, {} (higher is better)".format(beta, average, kwargs)
return config_string
......@@ -10,27 +10,14 @@ __status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
classes = kwargs["0"]
except Exception:
classes = None
score = metric(y_true, y_pred)
score = metric(y_true, y_pred, **kwargs)
return score
def get_scorer(**kwargs):
try:
classes = kwargs["0"]
except Exception:
classes = None
return make_scorer(metric, greater_is_better=False, classes=classes)
return make_scorer(metric, greater_is_better=False, **kwargs)
def get_config(**kwargs):
try:
classes = kwargs["0"]
except Exception:
classes = None
config_string = "Hamming loss using " + str(
classes) + " as classes (lower is better)"
config_string = "Hamming loss using {} (lower is better)".format(kwargs)
return config_string
......@@ -10,28 +10,15 @@ __status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
score = metric(y_true, y_pred, sample_weight=sample_weight)
score = metric(y_true, y_pred, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
return make_scorer(metric, greater_is_better=True,
sample_weight=sample_weight)
**kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
config_string = "Jaccard_similarity score using " + str(
sample_weight) + " as sample_weights (higher is better)"
config_string = "Jaccard_similarity score using {} (higher is better)".format(kwargs)
return config_string
......@@ -10,41 +10,15 @@ __status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
eps = kwargs["1"]
except Exception:
eps = 1e-15
score = metric(y_true, y_pred, sample_weight=sample_weight, eps=eps)
score = metric(y_true, y_pred, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
eps = kwargs["1"]
except Exception:
eps = 1e-15
return make_scorer(metric, greater_is_better=False,
sample_weight=sample_weight, eps=eps)
**kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
eps = kwargs["1"]
except Exception:
eps = 1e-15
config_string = "Log loss using " + str(
sample_weight) + " as sample_weights, " + str(
eps) + " as eps (lower is better)"
config_string = "Log loss using {} (lower is better)".format(kwargs)
return config_string
......@@ -7,73 +7,16 @@ warnings.warn("the precision_score module is deprecated", DeprecationWarning,
__author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
if multiclass:
average = "micro"
else:
average = "binary"
score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels,
pos_label=pos_label, average=average)
def score(y_true, y_pred, average='micro', multiclass=False, **kwargs):
score = metric(y_true, y_pred, average=average, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
average = "binary"
def get_scorer(average='micro', **kwargs):
return make_scorer(metric, greater_is_better=True,
sample_weight=sample_weight, labels=labels,
pos_label=pos_label,
average=average)
average=average, **kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except:
average = "binary"
config_string = "Precision score using " + str(
sample_weight) + " as sample_weights, " + str(
labels) + " as labels, " + str(pos_label) \
+ " as pos_label, " + average + " as average (higher is better)"
def get_config(average='micro', **kwargs):
config_string = "Precision score using average: {}, {} (higher is better)".format(average, kwargs)
return config_string
......@@ -9,74 +9,16 @@ __author__ = "Baptiste Bauvin"
__status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
if multiclass:
average = "micro"
else:
average = "binary"
score = metric(y_true, y_pred, sample_weight=sample_weight, labels=labels,
pos_label=pos_label, average=average)
def score(y_true, y_pred, average='micro', **kwargs):
score = metric(y_true, y_pred, average=average, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
average = "binary"
def get_scorer(average='micro', **kwargs):
return make_scorer(metric, greater_is_better=True,
sample_weight=sample_weight, labels=labels,
pos_label=pos_label,
average=average)
average=average, **kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
try:
labels = kwargs["1"]
except Exception:
labels = None
try:
pos_label = kwargs["2"]
except Exception:
pos_label = 1
try:
average = kwargs["3"]
except Exception:
average = "binary"
configString = "Recall score using " + str(
sample_weight) + " as sample_weights, " + str(
labels) + " as labels, " + str(pos_label) \
+ " as pos_label, " + average + "as average (higher is " \
"better) "
def get_config(average="micro", **kwargs):
configString = "Recall score using average: {}, {} (higher is better)".format(average, kwargs)
return configString
......@@ -11,17 +11,6 @@ __status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception :
sample_weight = None
try:
average = kwargs["1"]
except Exception:
if multiclass:
average = "micro"
else:
average = None
if multiclass:
mlb = MultiLabelBinarizer()
y_true = mlb.fit_transform([(label) for label in y_true])
......
......@@ -11,28 +11,15 @@ __status__ = "Prototype" # Production, Development, Prototype
def score(y_true, y_pred, multiclass=False, **kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
score = metric(y_true, y_pred, sample_weight=sample_weight)
score = metric(y_true, y_pred, **kwargs)
return score
def get_scorer(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
return make_scorer(metric, greater_is_better=False,
sample_weight=sample_weight)
**kwargs)
def get_config(**kwargs):
try:
sample_weight = kwargs["0"]
except Exception:
sample_weight = None
configString = "Zero_one loss using " + str(
sample_weight) + " as sample_weights (lower is better)"
configString = "Zero_one loss using {} (lower is better)".format(kwargs)
return configString
......@@ -14,8 +14,7 @@ class Test_genTestFoldsPreds(unittest.TestCase):
cls.random_state = np.random.RandomState(42)
cls.X_train = cls.random_state.random_sample((31, 10))
cls.y_train = np.ones(31, dtype=int)
cls.KFolds = StratifiedKFold(n_splits=3, random_state=cls.random_state,
shuffle=True)
cls.KFolds = StratifiedKFold(n_splits=3,)
cls.estimator = DecisionTreeClassifier(max_depth=1)
......@@ -30,5 +29,5 @@ class Test_genTestFoldsPreds(unittest.TestCase):
cls.estimator)
cls.assertEqual(testFoldsPreds.shape, (3, 10))
np.testing.assert_array_equal(testFoldsPreds[0], np.array(
[ 1, 1, 1, 1, -1, -1, 1, -1, 1, 1]))
[ 1, 1, -1, -1, 1, 1, -1, 1, -1, 1]))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment