Skip to content
Snippets Groups Projects
Commit 559d73c0 authored by Charly Lamothe's avatar Charly Lamothe
Browse files

- Remove extracted_forest_sizes_number parameter from compute_results.py and...

- Remove extracted_forest_sizes_number parameter from compute_results.py and retreive the value instead;
- Add almost all remaining experiment config files of stages 1, 2 and 3;
- Add almost all remaining result plots of stages 1, 2 and 3;
- Add some temporary scripts to run all stages experiments.
parent 8de5e96a
Branches
No related tags found
1 merge request!9Resolve "Experiment pipeline"
Showing
with 737 additions and 10 deletions
...@@ -9,6 +9,14 @@ from dotenv import find_dotenv, load_dotenv ...@@ -9,6 +9,14 @@ from dotenv import find_dotenv, load_dotenv
import os import os
def retreive_extracted_forest_sizes_number(models_dir, experiment_id):
experiment_id_path = models_dir + os.sep + str(experiment_id) # models/{experiment_id}
experiment_seed_root_path = experiment_id_path + os.sep + 'seeds' # models/{experiment_id}/seeds
seed = os.listdir(experiment_seed_root_path)[0]
experiment_seed_path = experiment_seed_root_path + os.sep + seed # models/{experiment_id}/seeds/{seed}
extracted_forest_sizes_root_path = experiment_seed_path + os.sep + 'extracted_forest_sizes'
return len(os.listdir(extracted_forest_sizes_root_path))
def extract_scores_across_seeds_and_extracted_forest_sizes(models_dir, results_dir, experiment_id): def extract_scores_across_seeds_and_extracted_forest_sizes(models_dir, results_dir, experiment_id):
experiment_id_path = models_dir + os.sep + str(experiment_id) # models/{experiment_id} experiment_id_path = models_dir + os.sep + str(experiment_id) # models/{experiment_id}
experiment_seed_root_path = experiment_id_path + os.sep + 'seeds' # models/{experiment_id}/seeds experiment_seed_root_path = experiment_id_path + os.sep + 'seeds' # models/{experiment_id}/seeds
...@@ -120,7 +128,6 @@ if __name__ == "__main__": ...@@ -120,7 +128,6 @@ if __name__ == "__main__":
'stage=2: {{no_normalization}} {{normalize_D}} {{normalize_weights}} {{normalize_D_and_weights}}' + \ 'stage=2: {{no_normalization}} {{normalize_D}} {{normalize_weights}} {{normalize_D_and_weights}}' + \
'stage=3: {{train-dev_subset}} {{train-dev_train-dev_subset}} {{train-train-dev_subset}}') 'stage=3: {{train-dev_subset}} {{train-dev_train-dev_subset}} {{train-train-dev_subset}}')
parser.add_argument('--dataset_name', nargs='?', type=str, required=True, help='Specify the dataset name. TODO: read it from models dir directly.') parser.add_argument('--dataset_name', nargs='?', type=str, required=True, help='Specify the dataset name. TODO: read it from models dir directly.')
parser.add_argument('--extracted_forest_sizes_number', nargs='?', type=int, required=True, help='Specify the number of extracted forest sizes. TODO: read it from models dir directly.')
parser.add_argument('--results_dir', nargs='?', type=str, default=DEFAULT_RESULTS_DIR, help='The output directory of the results.') parser.add_argument('--results_dir', nargs='?', type=str, default=DEFAULT_RESULTS_DIR, help='The output directory of the results.')
parser.add_argument('--models_dir', nargs='?', type=str, default=DEFAULT_MODELS_DIR, help='The output directory of the trained models.') parser.add_argument('--models_dir', nargs='?', type=str, default=DEFAULT_MODELS_DIR, help='The output directory of the trained models.')
args = parser.parse_args() args = parser.parse_args()
...@@ -130,6 +137,9 @@ if __name__ == "__main__": ...@@ -130,6 +137,9 @@ if __name__ == "__main__":
logger = LoggerFactory.create(LOG_PATH, os.path.basename(__file__)) logger = LoggerFactory.create(LOG_PATH, os.path.basename(__file__))
logger.info('Compute results of with stage:{} - experiment_ids:{} - dataset_name:{} - results_dir:{} - models_dir:{}'.format(
args.stage, args.experiment_ids, args.dataset_name, args.results_dir, args.models_dir))
# Create recursively the results dir tree # Create recursively the results dir tree
pathlib.Path(args.results_dir).mkdir(parents=True, exist_ok=True) pathlib.Path(args.results_dir).mkdir(parents=True, exist_ok=True)
...@@ -137,6 +147,9 @@ if __name__ == "__main__": ...@@ -137,6 +147,9 @@ if __name__ == "__main__":
if len(args.experiment_ids) != 6: if len(args.experiment_ids) != 6:
raise ValueError('In the case of stage 1, the number of specified experiment ids must be 6.') raise ValueError('In the case of stage 1, the number of specified experiment ids must be 6.')
# Retreive the extracted forest sizes number used in order to have a base forest axis as long as necessary
extracted_forest_sizes_number = retreive_extracted_forest_sizes_number(args.models_dir, args.experiment_ids[1])
# Experiments that used the best hyperparameters found for this dataset # Experiments that used the best hyperparameters found for this dataset
# base_with_params # base_with_params
...@@ -144,7 +157,7 @@ if __name__ == "__main__": ...@@ -144,7 +157,7 @@ if __name__ == "__main__":
base_with_params_train_scores, base_with_params_dev_scores, base_with_params_test_scores, \ base_with_params_train_scores, base_with_params_dev_scores, base_with_params_test_scores, \
base_with_params_experiment_score_metric = \ base_with_params_experiment_score_metric = \
extract_scores_across_seeds_and_forest_size(args.models_dir, args.results_dir, args.experiment_ids[0], extract_scores_across_seeds_and_forest_size(args.models_dir, args.results_dir, args.experiment_ids[0],
args.extracted_forest_sizes_number) extracted_forest_sizes_number)
# random_with_params # random_with_params
logger.info('Loading random_with_params experiment scores...') logger.info('Loading random_with_params experiment scores...')
random_with_params_train_scores, random_with_params_dev_scores, random_with_params_test_scores, \ random_with_params_train_scores, random_with_params_dev_scores, random_with_params_test_scores, \
...@@ -163,7 +176,7 @@ if __name__ == "__main__": ...@@ -163,7 +176,7 @@ if __name__ == "__main__":
base_wo_params_train_scores, base_wo_params_dev_scores, base_wo_params_test_scores, \ base_wo_params_train_scores, base_wo_params_dev_scores, base_wo_params_test_scores, \
base_wo_params_experiment_score_metric = extract_scores_across_seeds_and_forest_size( base_wo_params_experiment_score_metric = extract_scores_across_seeds_and_forest_size(
args.models_dir, args.results_dir, args.experiment_ids[3], args.models_dir, args.results_dir, args.experiment_ids[3],
args.extracted_forest_sizes_number) extracted_forest_sizes_number)
# random_wo_params # random_wo_params
logger.info('Loading random_wo_params experiment scores...') logger.info('Loading random_wo_params experiment scores...')
random_wo_params_train_scores, random_wo_params_dev_scores, random_wo_params_test_scores, \ random_wo_params_train_scores, random_wo_params_dev_scores, random_wo_params_test_scores, \
...@@ -292,6 +305,16 @@ if __name__ == "__main__": ...@@ -292,6 +305,16 @@ if __name__ == "__main__":
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True) pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
Plotter.plot_stage2_losses( Plotter.plot_stage2_losses(
file_path=output_path + os.sep + 'losses.png',
all_experiment_scores=[train_dev_subset_test_scores, train_dev_train_dev_subset_test_scores,
train_train_dev_subset_test_scores],
all_labels=['train,dev', 'train+dev,train+dev', 'train,train+dev'],
x_value=extracted_forest_sizes,
xlabel='Number of trees extracted',
ylabel=experiments_score_metric,
title='Loss values of {}\nusing different training subsets'.format(args.dataset_name))
"""Plotter.plot_stage2_losses(
file_path=output_path + os.sep + 'losses.png', file_path=output_path + os.sep + 'losses.png',
all_experiment_scores=[train_dev_subset_train_scores, train_train_dev_subset_train_scores, all_experiment_scores=[train_dev_subset_train_scores, train_train_dev_subset_train_scores,
train_train_dev_subset_train_scores, train_dev_subset_dev_scores, train_dev_train_dev_subset_dev_scores, train_train_dev_subset_train_scores, train_dev_subset_dev_scores, train_dev_train_dev_subset_dev_scores,
...@@ -303,10 +326,12 @@ if __name__ == "__main__": ...@@ -303,10 +326,12 @@ if __name__ == "__main__":
x_value=extracted_forest_sizes, x_value=extracted_forest_sizes,
xlabel='Number of trees extracted', xlabel='Number of trees extracted',
ylabel=experiments_score_metric, ylabel=experiments_score_metric,
title='Loss values of {}\nusing different training subsets'.format(args.dataset_name)) title='Loss values of {}\nusing different training subsets'.format(args.dataset_name))"""
else: else:
raise ValueError('This stage number is not supported yet, but it will be!') raise ValueError('This stage number is not supported yet, but it will be!')
logger.info('Done.')
""" """
TODO: TODO:
For each dataset: For each dataset:
......
...@@ -105,23 +105,23 @@ Command lines example for stage 1: ...@@ -105,23 +105,23 @@ Command lines example for stage 1:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --save_experiment_configuration 1 none_with_params --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --save_experiment_configuration 1 none_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --save_experiment_configuration 1 random_with_params --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --save_experiment_configuration 1 random_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 1 omp_with_params --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 1 omp_with_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --skip_best_hyperparams --save_experiment_configuration 1 none_wo_params --forest_size=1000 --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=none --skip_best_hyperparams --save_experiment_configuration 1 none_wo_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --skip_best_hyperparams --save_experiment_configuration 1 random_wo_params --forest_size=1000 --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --extraction_strategy=random --skip_best_hyperparams --save_experiment_configuration 1 random_wo_params --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --skip_best_hyperparams --save_experiment_configuration 1 omp_wo_params --forest_size=1000 --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --skip_best_hyperparams --save_experiment_configuration 1 omp_wo_params --extracted_forest_size_stop=0.05
python code/compute_results.py --stage 1 --experiment_ids 1 2 3 4 5 6 --dataset_name=california_housing --extracted_forest_sizes_number=5 python code/compute_results.py --stage 1 --experiment_ids 1 2 3 4 5 6 --dataset_name=california_housing
Command lines example for stage 2: Command lines example for stage 2:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 no_normalization --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 no_normalization --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_D --normalize_D --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_D --normalize_D --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_weights --normalize_weights --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_weights --normalize_weights --extracted_forest_size_stop=0.05
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_D_and_weights --normalize_D --normalize_weights --extracted_forest_size_stop=0.05 python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 2 normalize_D_and_weights --normalize_D --normalize_weights --extracted_forest_size_stop=0.05
python code/compute_results.py --stage 2 --experiment_ids 7 8 9 10 --dataset_name=california_housing --extracted_forest_sizes_number=5 python code/compute_results.py --stage 2 --experiment_ids 7 8 9 10 --dataset_name=california_housing
Command lines example for stage 3: Command lines example for stage 3:
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train,dev python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train,dev
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-dev_train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train+dev,train+dev python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-dev_train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train+dev,train+dev
python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train,train+dev python code/train.py --dataset_name=california_housing --seeds 1 2 3 4 5 --save_experiment_configuration 3 train-train-dev_subset --extracted_forest_size_stop=0.05 --subsets_used train,train+dev
python code/compute_results.py --stage 3 --experiment_ids 11 12 13 --dataset_name=california_housing --extracted_forest_sizes_number=5 python code/compute_results.py --stage 3 --experiment_ids 11 12 13 --dataset_name=california_housing
""" """
if __name__ == "__main__": if __name__ == "__main__":
load_dotenv(find_dotenv('.env')) load_dotenv(find_dotenv('.env'))
......
{
"experiment_id": 1,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"1",
"none_with_params"
],
"job_number": -1,
"extraction_strategy": "none",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 4,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": true,
"save_experiment_configuration": [
"1",
"none_wo_params"
],
"job_number": -1,
"extraction_strategy": "none",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 3,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"1",
"omp_with_params"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 6,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": true,
"save_experiment_configuration": [
"1",
"omp_wo_params"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 2,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"1",
"random_with_params"
],
"job_number": -1,
"extraction_strategy": "random",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 5,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": true,
"save_experiment_configuration": [
"1",
"random_wo_params"
],
"job_number": -1,
"extraction_strategy": "random",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 1,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage2",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"2",
"no_normalization"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 2,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": true,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage2",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"2",
"normalize_D"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 4,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": true,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage2",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": true,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"2",
"normalize_D_and_weights"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 3,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage2",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": true,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"2",
"normalize_weights"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 1,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage3",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"3",
"train-dev_subset"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 2,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage3",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train+dev,train+dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"3",
"train-dev_train-dev_subset"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 3,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "20newsgroups_vectorized",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/20newsgroups_vectorized/stage3",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,train+dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"3",
"train-train-dev_subset"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
7,
13,
20,
27,
34
]
}
\ No newline at end of file
{
"experiment_id": 1,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "boston",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/boston/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"1",
"none_with_params"
],
"job_number": -1,
"extraction_strategy": "none",
"extracted_forest_size": [
8,
17,
25,
33,
42
]
}
\ No newline at end of file
{
"experiment_id": 4,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "boston",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/boston/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": true,
"save_experiment_configuration": [
"1",
"none_wo_params"
],
"job_number": -1,
"extraction_strategy": "none",
"extracted_forest_size": [
8,
17,
25,
33,
42
]
}
\ No newline at end of file
{
"experiment_id": 3,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "boston",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/boston/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"1",
"omp_with_params"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
8,
17,
25,
33,
42
]
}
\ No newline at end of file
{
"experiment_id": 6,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "boston",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/boston/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": true,
"save_experiment_configuration": [
"1",
"omp_wo_params"
],
"job_number": -1,
"extraction_strategy": "omp",
"extracted_forest_size": [
8,
17,
25,
33,
42
]
}
\ No newline at end of file
{
"experiment_id": 2,
"experiment_configuration": null,
"experiment_configuration_path": "experiments",
"dataset_name": "boston",
"normalize_D": false,
"dataset_normalizer": "standard",
"forest_size": null,
"extracted_forest_size_samples": 5,
"extracted_forest_size_stop": 0.05,
"models_dir": "models/boston/stage1",
"dev_size": 0.2,
"test_size": 0.2,
"random_seed_number": 1,
"seeds": [
1,
2,
3,
4,
5
],
"subsets_used": "train,dev",
"normalize_weights": false,
"verbose": false,
"skip_best_hyperparams": false,
"save_experiment_configuration": [
"1",
"random_with_params"
],
"job_number": -1,
"extraction_strategy": "random",
"extracted_forest_size": [
8,
17,
25,
33,
42
]
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment