From d33b9961d9ae0eb082de1371880b4d0b47f4f973 Mon Sep 17 00:00:00 2001 From: Baptiste Bauvin <baptiste.bauvin.1@ulaval.ca> Date: Tue, 21 Nov 2017 08:38:18 -0500 Subject: [PATCH] WE work --- Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py | 2 +- Code/MonoMultiViewClassifiers/Metrics/f1_score.py | 4 ++-- Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py | 4 ++-- Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py | 2 +- .../Metrics/jaccard_similarity_score.py | 2 +- Code/MonoMultiViewClassifiers/Metrics/log_loss.py | 2 +- .../MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py | 2 +- Code/MonoMultiViewClassifiers/Metrics/precision_score.py | 4 ++-- Code/MonoMultiViewClassifiers/Metrics/recall_score.py | 4 ++-- Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py | 6 +++--- Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py | 2 +- Code/MonoMultiViewClassifiers/ResultAnalysis.py | 8 +++----- 12 files changed, 20 insertions(+), 22 deletions(-) diff --git a/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py b/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py index a51d11b7..86762167 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py @@ -11,7 +11,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): """Arguments: y_true: real labels y_pred predicted labels diff --git a/Code/MonoMultiViewClassifiers/Metrics/f1_score.py b/Code/MonoMultiViewClassifiers/Metrics/f1_score.py index 78e2d2ba..13633903 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/f1_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/f1_score.py @@ -11,7 +11,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -27,7 +27,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["3"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py b/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py index 6fc91278..aa2c9298 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -26,7 +26,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["4"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py b/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py index 2ad6e26c..1adbc376 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py +++ b/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: classes = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py b/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py index fff71707..38500825 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/Metrics/log_loss.py b/Code/MonoMultiViewClassifiers/Metrics/log_loss.py index 7a748037..4a771e42 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/log_loss.py +++ b/Code/MonoMultiViewClassifiers/Metrics/log_loss.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py b/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py index 3f077474..5d69563f 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py +++ b/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): score = metric(y_true, y_pred) return score diff --git a/Code/MonoMultiViewClassifiers/Metrics/precision_score.py b/Code/MonoMultiViewClassifiers/Metrics/precision_score.py index 09d63e64..a8087841 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/precision_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/precision_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -22,7 +22,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["3"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/recall_score.py b/Code/MonoMultiViewClassifiers/Metrics/recall_score.py index 194e1066..7ce329a9 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/recall_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/recall_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -22,7 +22,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["3"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py b/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py index ed03c774..4f2cc4db 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py @@ -7,7 +7,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -15,11 +15,11 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["1"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = None - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: mlb = MultiLabelBinarizer() y_true = mlb.fit_transform([(label) for label in y_true]) y_pred = mlb.fit_transform([(label) for label in y_pred]) diff --git a/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py b/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py index ea0b6478..cf632fed 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py +++ b/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/ResultAnalysis.py b/Code/MonoMultiViewClassifiers/ResultAnalysis.py index 64e6a93f..2b72ca9d 100644 --- a/Code/MonoMultiViewClassifiers/ResultAnalysis.py +++ b/Code/MonoMultiViewClassifiers/ResultAnalysis.py @@ -218,9 +218,10 @@ def genMetricsScoresMulticlass(results, trueLabels, metrics, argumentsDictionari for classifierName, resultDictionary in iterResults.items(): if not "metricsScores" in resultDictionary: results[iterIndex][classifierName]["metricsScores"]={} - trainScore = metricModule.score(trueLabels[trainIndices],resultDictionary["labels"][trainIndices]) + trainScore = metricModule.score(trueLabels[trainIndices],resultDictionary["labels"][trainIndices], multiclass=True) testScore = metricModule.score(trueLabels[multiclassTestIndices], - resultDictionary["labels"][multiclassTestIndices]) + resultDictionary["labels"][multiclassTestIndices], + multiclass=True) results[iterIndex][classifierName]["metricsScores"][metric[0]] = [trainScore, testScore] @@ -720,8 +721,6 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize= logging.debug("Done:\t Score graph generation for " + metric[0]) - - def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10): """Used to generate a graph showing errors on each example depending on classifier""" logging.debug("Start:\t Label analysis figure generation") @@ -807,7 +806,6 @@ def analyzeIterLabels(labelsAnalysisList, directory, classifiersNames, minSize=1 logging.debug("Done:\t Global error by example figure generation") - def genFig(iterResults, metric, nbResults, names, nbMono, minSize=10): """Used to generate the bar graph representing the mean scores of each classifiers if multiple iteration with different random states""" -- GitLab