diff --git a/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py b/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py index a51d11b7123e885020c67dfbd4ac8adffc2ec300..8676216759cfdcc297939c8b04e83b32340bb803 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/accuracy_score.py @@ -11,7 +11,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): """Arguments: y_true: real labels y_pred predicted labels diff --git a/Code/MonoMultiViewClassifiers/Metrics/f1_score.py b/Code/MonoMultiViewClassifiers/Metrics/f1_score.py index 78e2d2ba59c507ae837302a2b773fb6b68cda3d6..13633903807e07ccca84f0c3e5f81b79b51741ec 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/f1_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/f1_score.py @@ -11,7 +11,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -27,7 +27,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["3"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py b/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py index 6fc9127890153971b025ac01eb15d821001dd71d..aa2c9298720e44c18b45277aaabb4732032f0a20 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/fbeta_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -26,7 +26,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["4"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py b/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py index 2ad6e26c9991464f58034639abf666700f2a7c1c..1adbc376eabef4c3ec3659e44a6734be22cc7010 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py +++ b/Code/MonoMultiViewClassifiers/Metrics/hamming_loss.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: classes = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py b/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py index fff71707e7f517178245152482aa13a2e8a73e96..3850082516eec4f44d0064c498411b34093db2b8 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/jaccard_similarity_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/Metrics/log_loss.py b/Code/MonoMultiViewClassifiers/Metrics/log_loss.py index 7a748037c028881ee926b54053971a2a3860778b..4a771e42e0a444b590f7340cf60b01b3924d863e 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/log_loss.py +++ b/Code/MonoMultiViewClassifiers/Metrics/log_loss.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py b/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py index 3f07747446a3a6969321b202df46007d5a19891d..5d69563f7fe4938ed77e3f205d76fd1dac7acc12 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py +++ b/Code/MonoMultiViewClassifiers/Metrics/matthews_corrcoef.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): score = metric(y_true, y_pred) return score diff --git a/Code/MonoMultiViewClassifiers/Metrics/precision_score.py b/Code/MonoMultiViewClassifiers/Metrics/precision_score.py index 09d63e64e6b1f662abc74fb423a1ee85c42e2238..a80878415893e3886dbd15400b8aea17ded20e7e 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/precision_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/precision_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -22,7 +22,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["3"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/recall_score.py b/Code/MonoMultiViewClassifiers/Metrics/recall_score.py index 194e1066c1843e0f4ce72ed3f40a7e7a2e47d02f..7ce329a933ec3852fa1a3bccf1de6fa65dc90898 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/recall_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/recall_score.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -22,7 +22,7 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["3"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = "binary" diff --git a/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py b/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py index ed03c77471cd848c73e833f452d6dc1cc67ae111..4f2cc4dbf0d4f33b27e7072bed9053d22be533d0 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py +++ b/Code/MonoMultiViewClassifiers/Metrics/roc_auc_score.py @@ -7,7 +7,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: @@ -15,11 +15,11 @@ def score(y_true, y_pred, **kwargs): try: average = kwargs["1"] except: - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: average = "micro" else: average = None - if set(y_true) != {0,1} or (set(y_pred) != {0,1} and set(y_pred) != {0} and set(y_pred) != {1}): + if multiclass: mlb = MultiLabelBinarizer() y_true = mlb.fit_transform([(label) for label in y_true]) y_pred = mlb.fit_transform([(label) for label in y_pred]) diff --git a/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py b/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py index ea0b647872e5fc19b9192ae3b9dd977b154cbddd..cf632fed02e9ed794a9ba83988907a508197978b 100644 --- a/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py +++ b/Code/MonoMultiViewClassifiers/Metrics/zero_one_loss.py @@ -6,7 +6,7 @@ __author__ = "Baptiste Bauvin" __status__ = "Prototype" # Production, Development, Prototype -def score(y_true, y_pred, **kwargs): +def score(y_true, y_pred, multiclass=False, **kwargs): try: sample_weight = kwargs["0"] except: diff --git a/Code/MonoMultiViewClassifiers/ResultAnalysis.py b/Code/MonoMultiViewClassifiers/ResultAnalysis.py index 64e6a93fb5d53e7d6816d700020bb249c4371697..2b72ca9d4505d375dced9fe3c8496a62eb9c8cf9 100644 --- a/Code/MonoMultiViewClassifiers/ResultAnalysis.py +++ b/Code/MonoMultiViewClassifiers/ResultAnalysis.py @@ -218,9 +218,10 @@ def genMetricsScoresMulticlass(results, trueLabels, metrics, argumentsDictionari for classifierName, resultDictionary in iterResults.items(): if not "metricsScores" in resultDictionary: results[iterIndex][classifierName]["metricsScores"]={} - trainScore = metricModule.score(trueLabels[trainIndices],resultDictionary["labels"][trainIndices]) + trainScore = metricModule.score(trueLabels[trainIndices],resultDictionary["labels"][trainIndices], multiclass=True) testScore = metricModule.score(trueLabels[multiclassTestIndices], - resultDictionary["labels"][multiclassTestIndices]) + resultDictionary["labels"][multiclassTestIndices], + multiclass=True) results[iterIndex][classifierName]["metricsScores"][metric[0]] = [trainScore, testScore] @@ -720,8 +721,6 @@ def resultAnalysis(benchmark, results, name, times, metrics, directory, minSize= logging.debug("Done:\t Score graph generation for " + metric[0]) - - def analyzeLabels(labelsArrays, realLabels, results, directory, minSize = 10): """Used to generate a graph showing errors on each example depending on classifier""" logging.debug("Start:\t Label analysis figure generation") @@ -807,7 +806,6 @@ def analyzeIterLabels(labelsAnalysisList, directory, classifiersNames, minSize=1 logging.debug("Done:\t Global error by example figure generation") - def genFig(iterResults, metric, nbResults, names, nbMono, minSize=10): """Used to generate the bar graph representing the mean scores of each classifiers if multiple iteration with different random states"""