diff --git a/fedot/core/operations/evaluation/operation_implementations/models/boostings_implementations.py b/fedot/core/operations/evaluation/operation_implementations/models/boostings_implementations.py index 589c72c29e..cfd6a37cbd 100644 --- a/fedot/core/operations/evaluation/operation_implementations/models/boostings_implementations.py +++ b/fedot/core/operations/evaluation/operation_implementations/models/boostings_implementations.py @@ -91,7 +91,12 @@ def plot_feature_importance(self, importance_type='weight'): @staticmethod def convert_to_dataframe(data: Optional[InputData], identify_cats: bool): dataframe = pd.DataFrame(data=data.features) - dataframe['target'] = np.ravel(data.target) + if data.target is not None: + dataframe['target'] = np.ravel(data.target) + else: + # TODO: temp workaround in case data.target is set to None intentionally + # for test.integration.models.test_model.check_predict_correct + dataframe['target'] = np.zeros(len(data.features)) if identify_cats and data.categorical_idx is not None: for col in dataframe.columns[data.categorical_idx]: @@ -231,7 +236,12 @@ def set_eval_metric(n_classes): @staticmethod def convert_to_dataframe(data: Optional[InputData], identify_cats: bool): dataframe = pd.DataFrame(data=data.features, columns=data.features_names) - dataframe['target'] = np.ravel(data.target) + if data.target is not None: + dataframe['target'] = np.ravel(data.target) + else: + # TODO: temp workaround in case data.target is set to None intentionally + # for test.integration.models.test_model.check_predict_correct + dataframe['target'] = np.zeros(len(data.features)) if identify_cats and data.categorical_idx is not None: for col in dataframe.columns[data.categorical_idx]: