Skip to content
Snippets Groups Projects
Commit 51ba8a0e authored by Charly Lamothe's avatar Charly Lamothe
Browse files

- Unignore results;

- Even if hyperparameters file is ignore with skip_best_hyperparams option, still use the same forest_size to be comparable;
- Update experiment files for stage1 wo_param experiments (using the same forest size as the with_params experiments);
- In compute_results: remove useless folder creation; temporary add extracted_forest_sizes_number option to specify the extracted forest sizes number; temporary not plotting train and dev losses in stage1 loss values figure;
- In plotter, clean-up stage1 figure generation;
- Add first unbiased losses plot (stage1: best params vs default params in california housing dataset).
parent 76a58da9
No related branches found
No related tags found
1 merge request!9Resolve "Experiment pipeline"
models/*
results/*
experiments/unnamed/
*/.kile/*
......@@ -372,6 +371,3 @@ TSWLatexianTemp*
*.lpz
reports/*.pdf
# Image
*.png
......@@ -107,7 +107,7 @@ python code/train.py --dataset_name=california_housing --seeds 1 2 3 --save_expe
python code/train.py --dataset_name=california_housing --seeds 1 2 3 --extraction_strategy=none --skip_best_hyperparams --save_experiment_configuration 1 none_wo_params
python code/train.py --dataset_name=california_housing --seeds 1 2 3 --extraction_strategy=random --skip_best_hyperparams --save_experiment_configuration 1 random_wo_params
python code/train.py --dataset_name=california_housing --seeds 1 2 3 --skip_best_hyperparams --save_experiment_configuration 1 omp_wo_params
python code/compute_results.py --stage 1 --experiment_ids 1 2 3 4 5 6
python code/compute_results.py --stage 1 --experiment_ids 1 2 3 4 5 6 --dataset_name=california_housing --extracted_forest_sizes_number=5
"""
if __name__ == "__main__":
load_dotenv(find_dotenv('.env'))
......@@ -160,10 +160,14 @@ if __name__ == "__main__":
logger = LoggerFactory.create(LOG_PATH, os.path.basename(__file__))
hyperparameters_path = os.path.join('experiments', args.dataset_name, 'stage1', 'params.json')
if os.path.exists(hyperparameters_path) and not args.skip_best_hyperparams:
if os.path.exists(hyperparameters_path):
logger.info("Hyperparameters found for this dataset at '{}'".format(hyperparameters_path))
with open(hyperparameters_path, 'r+') as file_hyperparameter:
hyperparameters = json.load(file_hyperparameter)['best_parameters']
loaded_hyperparameters = json.load(file_hyperparameter)['best_parameters']
if args.skip_best_hyperparams:
hyperparameters = {'n_estimators': loaded_hyperparameters['n_estimators']}
else:
hyperparameters = loaded_hyperparameters
else:
hyperparameters = {}
......
......@@ -27,11 +27,11 @@
"job_number": -1,
"extraction_strategy": "none",
"extracted_forest_size": [
1,
3,
5,
6,
8
16,
33,
50,
66,
83
],
"experiment_id": 4
}
\ No newline at end of file
......@@ -27,11 +27,11 @@
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
1,
3,
5,
6,
8
16,
33,
50,
66,
83
],
"experiment_id": 6
}
\ No newline at end of file
......@@ -27,11 +27,11 @@
"job_number": -1,
"extraction_strategy": "random",
"extracted_forest_size": [
1,
3,
5,
6,
8
16,
33,
50,
66,
83
],
"experiment_id": 5
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment