Skip to content
Snippets Groups Projects
Commit 9abab367 authored by Baptiste Bauvin's avatar Baptiste Bauvin
Browse files

Local ok

parent 3b59eabe
Branches
Tags
No related merge requests found
Pipeline #4360 failed
# The base configuration of the benchmark
log: True
name: ["digits",]
label: "_"
file_type: ".hdf5"
views:
pathf: "/home/baptiste/Documents/Datasets/Digits/"
nice: 0
random_state: 42
nb_cores: 1
full: True
debug: True
add_noise: False
noise_std: 0.0
res_dir: "../results/"
track_tracebacks: False
# All the classification-realted configuration options
multiclass_method: "oneVersusOne"
split: 0.25
nb_folds: 2
nb_class:
classes:
type: ["monoview"]
algos_monoview: ["decision_tree" ]
algos_multiview: ["weighted_linear_early_fusion","weighted_linear_late_fusion"]
stats_iter: 3
metrics:
accuracy_score: {}
f1_score: {}
metric_princ: "accuracy_score"
hps_type: "None"
hps_args:
n_iter: 10
equivalent_draws: False
weighted_linear_early_fusion:
view_weights: null
monoview_classifier_name: "decision_tree"
monoview_classifier_config:
decision_tree:
max_depth: 12
criterion: "gini"
splitter: "best"
weighted_linear_late_fusion:
weights: null
classifiers_names: "decision_tree"
classifier_configs:
decision_tree:
max_depth: 3
criterion: "gini"
splitter: "best"
decision_tree:
max_depth: 3
######################################
## The Monoview Classifier arguments #
######################################
mumbo:
base_estimator__criterion: 'gini'
base_estimator__max_depth: 3
base_estimator__random_state: None
base_estimator__splitter: 'best'
best_view_mode: 'edge'
base_estimator: 'decision_tree'
n_estimators: 10
mucombo:
base_estimator__criterion: 'gini'
base_estimator__max_depth: 3
base_estimator__random_state: None
base_estimator__splitter: 'best'
best_view_mode: 'edge'
base_estimator: 'decision_tree'
n_estimators: 10
#
#random_forest:
# n_estimators: [25]
# max_depth: [3]
# criterion: ["entropy"]
#
#svm_linear:
# C: [1]
#
#svm_rbf:
# C: [1]
#
#svm_poly:
# C: [1]
# degree: [2]
#
#adaboost:
# n_estimators: [50]
# base_estimator: ["DecisionTreeClassifier"]
#
#adaboost_pregen:
# n_estimators: [50]
# base_estimator: ["DecisionTreeClassifier"]
# n_stumps: [1]
#
#adaboost_graalpy:
# n_iterations: [50]
# n_stumps: [1]
#
#
#decision_tree_pregen:
# max_depth: [10]
# criterion: ["gini"]
# splitter: ["best"]
# n_stumps: [1]
#
#sgd:
# loss: ["hinge"]
# penalty: [l2]
# alpha: [0.0001]
#
#knn:
# n_neighbors: [5]
# weights: ["uniform"]
# algorithm: ["auto"]
#
#scm:
# model_type: ["conjunction"]
# max_rules: [10]
# p: [0.1]
#
#scm_pregen:
# model_type: ["conjunction"]
# max_rules: [10]
# p: [0.1]
# n_stumps: [1]
#
#cq_boost:
# mu: [0.01]
# epsilon: [1e-06]
# n_max_iterations: [5]
# n_stumps: [1]
#
#cg_desc:
# n_max_iterations: [10]
# n_stumps: [1]
#
#cb_boost:
# n_max_iterations: [10]
# n_stumps: [1]
#
#lasso:
# alpha: [1]
# max_iter: [2]
#
#gradient_boosting:
# n_estimators: [2]
#
#
#######################################
## The Multiview Classifier arguments #
#######################################
#
#weighted_linear_early_fusion:
# view_weights: [null]
# monoview_classifier_name: ["decision_tree"]
# monoview_classifier_config:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#entropy_fusion:
# classifiers_names: [["decision_tree"]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#disagree_fusion:
# classifiers_names: [["decision_tree"]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#
#double_fault_fusion:
# classifiers_names: [["decision_tree"]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#difficulty_fusion:
# classifiers_names: [["decision_tree"]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#scm_late_fusion:
# classifiers_names: [["decision_tree"]]
# p: 0.1
# max_rules: 10
# model_type: 'conjunction'
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#majority_voting_fusion:
# classifiers_names: [["decision_tree", "decision_tree", "decision_tree", ]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#bayesian_inference_fusion:
# classifiers_names: [["decision_tree", "decision_tree", "decision_tree", ]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#weighted_linear_late_fusion:
# classifiers_names: [["decision_tree", "decision_tree", "decision_tree", ]]
# classifier_configs:
# decision_tree:
# max_depth: [1]
# criterion: ["gini"]
# splitter: ["best"]
#
#mumbo:
# base_estimator: [null]
# n_estimators: [10]
# best_view_mode: ["edge"]
#
#lp_norm_mkl:
# lmbda: [0.1]
# n_loops: [50]
# precision: [0.0001]
# kernel: ["rbf"]
# kernel_params:
# gamma: [0.1]
#
#mvml:
# reg_params: [[0,1]]
# nystrom_param: [1]
# learn_A: [1]
# learn_w: [0]
# n_loops: [6]
# kernel_types: ["rbf_kernel"]
# kernel_configs:
# gamma: [0.1]
...@@ -903,7 +903,7 @@ def exec_classif(arguments): ...@@ -903,7 +903,7 @@ def exec_classif(arguments):
args["full"], args["full"],
) )
args["name"] = datasetname args["name"] = datasetname
splits = execution.gen_splits(dataset_var.get_labels(), splits = execution.gen_splits(dataset_var,
args["split"], args["split"],
stats_iter_random_states) stats_iter_random_states)
......
...@@ -213,7 +213,7 @@ def gen_splits(dataset_var, split_ratio, stats_iter_random_states): ...@@ -213,7 +213,7 @@ def gen_splits(dataset_var, split_ratio, stats_iter_random_states):
for ind in test_fold: for ind in test_fold:
if not example_ids[ind].startswith("new_"): if not example_ids[ind].startswith("new_"):
test_indices.append(indices[ind]) test_indices.append(indices[ind])
splits.append([train_indices, test_indices]) splits.append([train_indices, np.array(test_indices)])
return splits return splits
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment