diff --git a/multiview_platform/MonoMultiViewClassifiers/Monoview/Additions/QarBoostUtils.py b/multiview_platform/MonoMultiViewClassifiers/Monoview/Additions/QarBoostUtils.py index 25b7044eca46e0442a87711d974ef6d2d2187323..86fc876202b90c8400978e0c35222249957d0523 100644 --- a/multiview_platform/MonoMultiViewClassifiers/Monoview/Additions/QarBoostUtils.py +++ b/multiview_platform/MonoMultiViewClassifiers/Monoview/Additions/QarBoostUtils.py @@ -20,6 +20,29 @@ class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost): n_stumps_per_attribute=None, use_r=True, plotted_metric=Metrics.zero_one_loss): super(ColumnGenerationClassifierQar, self).__init__() + r""" + + Parameters + ---------- + n_max_iterations : int + Maximum number of iterations for the boosting algorithm. + estimators_generator : object + Sk-learn classifier object used to generate the hypotheses with the data. + random_state : np.random.RandomState or int + The random state, used in order to be reproductible + self_complemented : bool + If True, in the hypotheses generation process, for each hypothesis, it's complement will be generated too. + twice_the_same : bool + If True, the algorithm will be allowed to select twice the same hypothesis in the boosting process. + c_bound_choice : bool + If True, the C-Bound will be used to select the hypotheses. If False, the margin will be the criterion. + n_stumps_per_attribute : int + The number of hypotheses generated by data attribute + use_r : bool + If True, uses edge to compute the performance of a voter. If False, use the error instead. + plotted_metric : Metric module + The metric that will be plotted for each iteration of boosting. + """ if type(random_state) is int: self.random_state = np.random.RandomState(random_state) @@ -45,6 +68,9 @@ class ColumnGenerationClassifierQar(BaseEstimator, ClassifierMixin, BaseBoost): self.twice_the_same = params["twice_the_same"] self.c_bound_choice = params["c_bound_choice"] self.random_start = params["random_start"] + self.n_max_iterations = params["n_max_iterations"] + self.n_stumps = params["n_stumps_per_attribute"] + self.use_r = params["use_r"] def fit(self, X, y): start = time.time()