diff --git a/code/compute_results.py b/code/compute_results.py
index 5f7fac2c7718cf887d3d83a5b3a7eb9cdebfb9d9..3bd2f8a22cab9fe5dc224bd36fbc90e154b9f558 100644
--- a/code/compute_results.py
+++ b/code/compute_results.py
@@ -420,26 +420,36 @@ if __name__ == "__main__":
         omp_with_params_train_scores, omp_with_params_dev_scores, omp_with_params_test_scores, _, \
             omp_with_params_experiment_score_metric = extract_scores_across_seeds_and_extracted_forest_sizes(
                 args.models_dir, args.results_dir, args.experiment_ids[2])
-        # omp_with_params
+        #omp_with_params_without_weights
+        logger.info('Loading omp_with_params experiment scores...')
+        omp_with_params_without_weights_train_scores, omp_with_params_without_weights_dev_scores, omp_with_params_without_weights_test_scores, _, \
+            omp_with_params_experiment_score_metric = extract_scores_across_seeds_and_extracted_forest_sizes(
+                args.models_dir, args.results_dir, args.experiment_ids[2], weights=False)
+        # kmeans_with_params
         logger.info('Loading kmeans_with_params experiment scores...')
         kmeans_with_params_train_scores, kmeans_with_params_dev_scores, kmeans_with_params_test_scores, _, \
             kmeans_with_params_experiment_score_metric = extract_scores_across_seeds_and_extracted_forest_sizes(
                 args.models_dir, args.results_dir, args.experiment_ids[3])
-        
+        # similarity_with_params
+        logger.info('Loading similarity_with_params experiment scores...')
+        similarity_with_params_train_scores, similarity_with_params_dev_scores, similarity_with_params_test_scores, _, \
+            similarity_with_params_experiment_score_metric = extract_scores_across_seeds_and_extracted_forest_sizes(
+                args.models_dir, args.results_dir, args.experiment_ids[4])
+
         # Sanity check on the metrics retreived
         if not (base_with_params_experiment_score_metric == random_with_params_experiment_score_metric
             == omp_with_params_experiment_score_metric == kmeans_with_params_experiment_score_metric):
             raise ValueError('Score metrics of all experiments must be the same.')
         experiments_score_metric = base_with_params_experiment_score_metric
 
-        output_path = os.path.join(args.results_dir, args.dataset_name, 'stage5_kmeans')
+        output_path = os.path.join(args.results_dir, args.dataset_name, 'stage5')
         pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
 
         Plotter.plot_stage2_losses(
             file_path=output_path + os.sep + 'losses.png',
             all_experiment_scores=[base_with_params_test_scores, random_with_params_test_scores, omp_with_params_test_scores,
-                kmeans_with_params_test_scores],
-            all_labels=['base', 'random', 'omp', 'kmeans'],
+                omp_with_params_without_weights_test_scores, kmeans_with_params_test_scores, similarity_with_params_test_scores],
+            all_labels=['base', 'random', 'omp', 'omp_without_weights', 'kmeans', 'similarity'],
             x_value=with_params_extracted_forest_sizes,
             xlabel='Number of trees extracted',
             ylabel=experiments_score_metric,