diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c0f017978..f56111f3ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ All notable changes to this project will be documented in this file. ### Changed +- Include additional Vulnerability Detector E2E tests ([#5287](https://github.com/wazuh/wazuh-qa/pull/5287)) \- (Framework + Tests) - Change Vulnerability Detection feed updated waiter ([#5227](https://github.com/wazuh/wazuh-qa/pull/5227)) \- (Tests) - Replace timestamp filter with vulnerabilities detected_at field.([#5266](https://github.com/wazuh/wazuh-qa/pull/5266)) \- (Framework + Tests) - Changes macOS packages with new ones that generate vulnerabilities ([#5174](https://github.com/wazuh/wazuh-qa/pull/5174)) \- (Tests) diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/__init__.py b/deps/wazuh_testing/wazuh_testing/end_to_end/__init__.py index c107f5bedd..b7bd7bb0cd 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/__init__.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/__init__.py @@ -1,15 +1,21 @@ -# Copyright (C) 2015-2022, Wazuh Inc. +# Copyright (C) 2015, Wazuh Inc # Created by Wazuh, Inc. . # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 +import json +import logging import os import requests + +from dataclasses import dataclass from http import HTTPStatus from tempfile import gettempdir +from typing import Any, Callable, List from wazuh_testing.tools.utils import retry fetched_alerts_json_path = os.path.join(gettempdir(), 'alerts.json') +VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN = 130 base_path = { 'linux': '/var/ossec', @@ -32,22 +38,24 @@ def get_alert_indexer_api(query, credentials, ip_address, index='wazuh-alerts-4.x-*'): """Get an alert from the wazuh-indexer API - Make a request to the wazuh-indexer API to get the last indexed alert that matches the values passed in - must_match. + Make a request to the wazuh-indexer API to get the last indexed alert that matches the values passed in + query. - Args: - ip_address (str): wazuh-indexer IP address. - index (str): Index in which to search for the alert. - query (dict): Query to send to the API. - credentials(dict): wazuh-indexer credentials. + Args: + ip_address (str): wazuh-indexer IP address. + index (str): Index in which to search for the alert. + query (dict): Query to send to the API. + credentials(dict): wazuh-indexer credentials. - Returns: - `obj`(map): Search results + Returns: + `obj`(map): Search results """ url = f"https://{ip_address}:9200/{index}/_search?" - response = requests.get(url=url, params={'pretty': 'true'}, json=query, verify=False, - auth=requests.auth.HTTPBasicAuth(credentials['user'], credentials['password'])) + response = requests.get(url=url, params={'pretty': 'true'}, json=query, + verify=False, + auth=requests.auth.HTTPBasicAuth(credentials['user'], + credentials['password'])) if '"hits" : [ ]' in response.text: raise Exception('Alert not indexed') @@ -60,19 +68,20 @@ def get_alert_indexer_api(query, credentials, ip_address, index='wazuh-alerts-4. def delete_index_api(credentials, ip_address, index='wazuh-alerts-4.x-*'): """Delete indices from wazuh-indexer using its API. - Make a request to the wazuh-indexer API to delete indices that match a given name. + Make a request to the wazuh-indexer API to delete indices that match a given name. - Args: - ip_address (str): wazuh-indexer IP address. - index (str): Name of the index to be deleted. - credentials(dict): wazuh-indexer credentials. + Args: + ip_address (str): wazuh-indexer IP address. + index (str): Name of the index to be deleted. + credentials(dict): wazuh-indexer credentials. - Returns: - obj(class): `Response ` object - obj(class): `NoneType` object + Returns: + obj(class): `Response ` object + obj(class): `NoneType` object """ url = f"https://{ip_address}:9200/" - authorization = requests.auth.HTTPBasicAuth(credentials['user'], credentials['password']) + authorization = requests.auth.HTTPBasicAuth(credentials['user'], + credentials['password']) response = requests.delete(url=url+index, params={'pretty': 'true'}, verify=False, auth=authorization) @@ -85,12 +94,12 @@ def delete_index_api(credentials, ip_address, index='wazuh-alerts-4.x-*'): def make_query(must_match): """Create a query according to the values passed in must_match. - Args: - must_match (list): Values to be matched with the indexed alert. + Args: + must_match (list): Values to be matched with the indexed alert. - Returns: - dict: Fully formed query. - """ + Returns: + dict: Fully formed query. + """ query = { "query": { "bool": { @@ -108,3 +117,274 @@ def make_query(must_match): } return query + + +@dataclass +class Evidence: + """A data class representing evidence. + + Attributes: + name (str): The name of the evidence. + value (Any): The value of the evidence. + debug (bool, optional): Indicates whether the evidence is for debugging, for verbose evidences. + Defaults to False. + """ + name: str + value: Any + debug: bool = False + + def collect_evidence(self, evidences_directory: str): + """Collects evidence and stores it in the specified directory. + + Args: + evidences_directory (str): The directory where evidence files will be stored. + """ + try: + with open(os.path.join(evidences_directory, self.name), 'w') as evidence_file: + self._write_to_file(evidence_file) + except Exception as e: + self._log_error(e) + + def _write_to_file(self, evidence_file): + """Writes evidence to a file. + + Args: + evidence_file: File object to write evidence to. + """ + if isinstance(self.value, (dict, list)): + json.dump(self.value, evidence_file, indent=4) + else: + evidence_file.write(str(self.value)) + + def _log_error(self, e): + """Logs error occurred while writing evidence. + + Args: + e: The exception that occurred. + """ + logging.error(f"Error while writing evidence {self.name}: {e}") + + def dict(self): + """Returns the evidence as a dictionary. + + Returns: + dict: A dictionary representation of the evidence. + """ + return {self.name: self.value} + + +class Check: + """A class representing a check to be performed, including validation and reporting. + + Attributes: + name (str): The name of the check. + assert_function (Callable): The function used for assertion. + expected_evidences (List[str] | None): List of expected evidence names to perform the validation. + Default is None. + result: The result of the check. + evidences: List of collected evidence objects. + """ + def __init__(self, name: str, assert_function: Callable, + expected_evidences: List[str] = None): + """Initializes a check with the given name, assertion function, and expected evidences. + + Args: + name (str): The name of the check. + assert_function (Callable): The function used for assertion. + expected_evidences (List[str] | None, optional): List of expected evidence names. Defaults to None. + """ + self.name = name + self.result = None + self.assert_function = assert_function + self.expected_evidences = expected_evidences if expected_evidences else [] + self.evidences = [] + + def __str__(self) -> str: + """Returns a string representation of the check. + + Returns: + str: A string containing the check's name and result. + """ + return self.report_check() + + def validate(self, evidences: List[Evidence] = None) -> bool: + """Validates the check using the provided evidences. + + Args: + evidences (List[Evidence] | None, optional): List of evidence objects. Defaults to None. + + Returns: + bool: True if validation succeeds, False otherwise. + + Raises: + ValueError: If provided evidences do not contains the expected ones. + """ + + evidences = [] if not evidences else evidences + + provided_evidences_names = [evidence.name for evidence in evidences] + provided_evidences_expected = [evidence for evidence in evidences + if evidence.name in self.expected_evidences] + + if len(self.expected_evidences) != len(provided_evidences_expected): + raise ValueError('Evidences should match the expected ones.\n' + + f"Expected evidences: {self.expected_evidences}." + f"Evidences found: {provided_evidences_names}") + + self.result = self.assert_function(*[evidence.value for evidence in provided_evidences_expected]) + self.evidences = evidences + + logging.error(f"Marked check {self.name} result to {self.result} with evidences {provided_evidences_names}") + + return self.result + + def get_result(self): + """Gets the result of the check. + + Returns: + Any: The result of the check. + + Raises: + ValueError: If the check has not been executed yet. + """ + if self.result is None: + raise ValueError(f"Check {self.name} has not been executed yet") + + return self.result + + def report_check(self): + """Generates a report message for the check. + + Returns: + str: A report message indicating whether the check succeeded or failed. + """ + message = f"Check {self.name} " + message += f"failed. Evidences ({self.expected_evidences}) " + \ + "can be found in the report." if not self.get_result() else "succeeded" + message += '\n' + + return message + + def collect_evidences(self, evidences_directory: str, collect_debug_evidences: bool = False): + """Collects evidences for the check. + + Args: + evidences_directory (str): The directory where evidence files will be stored. + collect_debug_evidences (bool, optional): If True, collects debug evidence. Defaults to False. + """ + for evidence in self.evidences: + if evidence.debug and not collect_debug_evidences: + continue + + evidence.collect_evidence(evidences_directory) + + +class TestResult: + """A data class representing a test result. + + Attributes: + test_name (str): The name of the test. + checks (List[Check]): List of checks of the test, default is an empty list. + """ + def __init__(self, test_name: str, checks: List[Check] = None): + """Initializes a test suite with the given name and checks. + + Args: + test_name (str): The name of the test suite. + checks (List[Check] | None, optional): List of checks. Defaults to None. + """ + self.test_name = test_name + self.checks = checks if checks else [] + + def __str__(self) -> str: + """Returns a string representation of the test suite. + + Returns: + str: A string containing the test suite's name and report. + """ + return self.report() + + def add_check(self, check: Check) -> None: + """Adds a check to the test suite. + + Args: + check (Check): The check to be added. + """ + self.checks.append(check) + + def get_test_result(self) -> bool: + """Gets the result of the test suite. + + Returns: + bool: True if all checks passed, False otherwise. + """ + return all([check.result for check in self.checks]) + + def collect_evidences(self, evidences_directory: str, + collect_verbose_evidences: bool = False, + collect_evidences_for_passed_checks: bool = False) -> None: + """Collects evidences for the checks in the test suite. + + Args: + evidences_directory (str): The directory where evidence files will be stored. + collect_verbose_evidences (bool, optional): If True, collects verbose evidences. Defaults to False. + collect_evidences_for_passed_checks (bool, optional): If True, collects evidences for passed checks as well. + Defaults to False. + """ + for check in self.checks: + if check.get_result() and not collect_evidences_for_passed_checks: + continue + check.collect_evidences(evidences_directory, collect_verbose_evidences) + + def report(self) -> str: + """Generates a report message for the test suite. + + Returns: + str: A report message indicating whether the test suite succeeded or failed, + along with individual check reports. + """ + message = f"\nTest {self.test_name} " + message += "failed\n\n" if not self.get_test_result() else "succeeded:\n\n-----\n" + + if not self.get_test_result(): + for check in self.checks: + message += check.report_check() + + message += "-----\n" + + return message + + def validate_check(self, check_name: str, evidences: List[Evidence]) -> bool: + """Validates a specific check in the test suite. + + Args: + check_name (str): The name of the check to validate. + evidences (List[Evidence]): List of evidence objects. + + Returns: + bool: True if validation succeeds, False otherwise. + + Raises: + ValueError: If the check with the given name is not found in the test suite. + """ + check = self.get_check(check_name) + + return check.validate(evidences) + + def get_check(self, check_name: str) -> Check: + """ Retrieves a specific check from the test suite. + + Args: + check_name (str): The name of the check to retrieve. + + Returns: + Check: The check object. + + Raises: + ValueError: If the check with the given name is not found in the test suite. + """ + for check in self.checks: + if check.name == check_name: + return check + else: + raise ValueError(f"Check {check_name} not found in test {self.test_name}") diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py b/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py new file mode 100644 index 0000000000..aa33e5761f --- /dev/null +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/check_validators.py @@ -0,0 +1,164 @@ +import logging +from typing import Dict + + +def get_failed_operation_hosts(global_operation_results: dict) -> list: + failed_hosts = [] + for host, operation_result in global_operation_results.items(): + if not operation_result: + logging.critical(f"Operation on {host} failed") + failed_hosts.append(host) + + return failed_hosts + + +def validate_operation_results(global_operation_results: dict) -> bool: + return len(get_failed_operation_hosts(global_operation_results)) == 0 + + +def compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabilities): + result = True + + vulnerabilities_not_found = {} + vulnerabilities_unexpected = {} + + failed_agents = [] + + for agent, expected_vulns in expected_vulnerabilities.items(): + for vulnerability in expected_vulns: + if vulnerability not in vulnerabilities.get(agent, []): + logging.critical(f"Vulnerability not found for {agent}: {vulnerability}") + if agent not in vulnerabilities_not_found: + vulnerabilities_not_found[agent] = [] + failed_agents.append(agent) + + result = False + vulnerabilities_not_found[agent].append(vulnerability) + + for agent, agent_vulnerabilities in vulnerabilities.items(): + for vulnerability in agent_vulnerabilities: + if vulnerability not in expected_vulnerabilities.get(agent, []): + logging.critical(f"Vulnerability unexpected found for {agent}: {vulnerability}") + if agent not in vulnerabilities_unexpected: + vulnerabilities_unexpected[agent] = [] + failed_agents.append(agent) + + result = False + vulnerabilities_unexpected[agent].append(vulnerability) + + if not result: + logging.critical(f"Vulnerabilities not found: {vulnerabilities_not_found}") + logging.critical(f"Vulnerabilities unexpected: {vulnerabilities_unexpected}") + + return { + 'vulnerabilities_not_found': vulnerabilities_not_found, + 'vulnerabilities_unexpected': vulnerabilities_unexpected, + 'failed_agents': failed_agents, + 'result': result + } + + +def expected_vulnerabilities_index(vulnerabilities, expected_vulnerabilities): + expected_found_comparision = compare_expected_found_vulnerabilities(vulnerabilities, + expected_vulnerabilities) + + return expected_found_comparision['result'] + + +def get_duplicated_elements(list_of_elements) -> list: + seen = set() + duplicated = set() + for item in list_of_elements: + if item in seen: + duplicated.add(item) + seen.add(item) + + return list(duplicated) + + +def get_duplicated_vulnerabilities(vulnerabilities: Dict) -> list: + global_duplicated_vulnerabilities = [] + + for agent, agent_vulnerabilities in vulnerabilities.items(): + duplicated_vulnerabilities = get_duplicated_elements(agent_vulnerabilities) + if duplicated_vulnerabilities: + global_duplicated_vulnerabilities.append({agent: duplicated_vulnerabilities}) + + return global_duplicated_vulnerabilities + + +def compare_expected_found_vulnerabilities_alerts(vulnerabilities, expected_vulnerabilities): + result = True + vulnerabilities_affected_not_found = {} + vulnerabilities_mitigated_not_found = {} + + failed_agents = [] + + vulnerabilities_present = vulnerabilities.get('affected', {}) + vulnerabilities_absent = vulnerabilities.get('mitigated', {}) + expected_vulnerabilities_affected = expected_vulnerabilities.get('affected', {}) + expected_vulnerabilities_mitigated = expected_vulnerabilities.get('mitigated', {}) + + for agent, vulnerabilities in expected_vulnerabilities_affected.items(): + for vulnerability in vulnerabilities: + if vulnerability not in vulnerabilities_present.get(agent): + if agent not in vulnerabilities_affected_not_found: + vulnerabilities_affected_not_found[agent] = [] + failed_agents.append(agent) + + vulnerabilities_affected_not_found[agent].append(vulnerability) + result = False + + for agent, vulnerabilities in expected_vulnerabilities_mitigated.items(): + for vulnerability in vulnerabilities: + if vulnerability not in vulnerabilities_absent.get(agent): + if agent not in vulnerabilities_mitigated_not_found: + vulnerabilities_mitigated_not_found[agent] = [] + failed_agents.append(agent) + + vulnerabilities_mitigated_not_found[agent].append(vulnerability) + result = False + + if not result: + logging.critical(f"Vulnerabilities affected not found: {vulnerabilities_affected_not_found}") + logging.critical(f"Vulnerabilities mitigated not found: {vulnerabilities_mitigated_not_found}") + + return { + 'vulnerabilities_affected_not_found': vulnerabilities_affected_not_found, + 'vulnerabilities_mitigated_not_found': vulnerabilities_mitigated_not_found, + 'failed_agents': failed_agents, + 'result': result + } + + +def expected_vulnerability_alerts(vulnerabilities, expected_vulnerabilities): + + expected_found_comparision = compare_expected_found_vulnerabilities_alerts(vulnerabilities, + expected_vulnerabilities) + return expected_found_comparision['result'] + + +def empty_dict(dictionary: dict): + result = True + for key, value in dictionary.items(): + if value: + logging.critical(f"{key} is not empty: {value}") + result = False + + return result + + +equals = lambda x, y: x == y + + +def equals_but_not_empty(x, y): + return equals(x, y) and not empty(x) + + +empty = lambda x: len(x) == 0 + +no_errors = lambda x: all( + not any(x[host][level] for level in ["ERROR", "CRITICAL", "WARNING"]) + for host in x +) + diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/configuration.py b/deps/wazuh_testing/wazuh_testing/end_to_end/configuration.py index ce3c0a980c..926a959555 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/configuration.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/configuration.py @@ -16,14 +16,15 @@ Created by Wazuh, Inc. . This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -import xml.dom.minidom +import ast import logging - +import xml.dom.minidom from multiprocessing.pool import ThreadPool from typing import Dict, List from wazuh_testing.end_to_end import configuration_filepath_os -from wazuh_testing.tools.configuration import set_section_wazuh_conf +from wazuh_testing.tools.configuration import (load_configuration_template, + set_section_wazuh_conf) from wazuh_testing.tools.system import HostManager @@ -223,3 +224,90 @@ def save_indexer_credentials_into_keystore(host_manager): for manager in host_manager.get_group_hosts('manager'): host_manager.run_command(manager, f"{keystore_path} -f indexer -k username -v {indexer_user}") host_manager.run_command(manager, f"{keystore_path} -f indexer -k password -v {indexer_password}") + + +def change_agent_manager_ip(host_manager: HostManager, agent: str, new_manager_ip: str) -> None: + """Change the manager IP of an agent. + + Args: + host_manager: An instance of the HostManager class containing information about hosts. + agent: The name of the agent to be configured. + new_manager_ip: The new manager IP for the agent. + """ + + server_block = {'server': {'elements': [{'address': {'value': new_manager_ip}}]}} + configuration = {'sections': [{'section': 'client', 'elements': [server_block]}]} + + new_configuration = {f"{agent}": [configuration]} + + configure_host(agent, new_configuration, host_manager) + + +def load_vulnerability_detector_configurations(host_manager, configurations_paths, enable=True, + syscollector_interval='1m'): + """Returns the configurations for Vulnerability testing for the agent and manager roles + + Args: + host_manager (HostManager): An instance of the HostManager class containing information about hosts. + configurations_paths (Dict): The paths to the configuration templates for the agent and manager roles. + enable (bool, optional): Enable or disable the vulnerability detector. Defaults to True. + syscollector_interval (str, optional): The syscollector interval. Defaults to '1m'. + + Return: + Dict: Configurations for each role + """ + configurations = {} + vd_enable_value = 'yes' if enable else 'no' + + for host in host_manager.get_group_hosts('all'): + if host in host_manager.get_group_hosts('agent'): + configurations[host] = load_configuration_template(configurations_paths['agent'], + [{}], [{}]) + + configuration_template_str = str(configurations[host]) + configuration_variables = { + 'SYSCOLLECTOR_INTERVAL': syscollector_interval + } + + for key, value in configuration_variables.items(): + configuration_template_str = configuration_template_str.replace(key, value) + configurations[host] = ast.literal_eval(configuration_template_str) + + elif host in host_manager.get_group_hosts('manager'): + configuration_template = load_configuration_template(configurations_paths['manager'], [{}], [{}]) + + # Replace placeholders by real values + manager_index = host_manager.get_group_hosts('manager').index(host) + 2 + indexer_server = host_manager.get_group_hosts('indexer')[0] + indexer_server_variables = host_manager.get_host_variables(indexer_server) + + default_filebeat_key_path = f"/etc/pki/filebeat/node-{manager_index}-key.pem" + + filebeat_key = indexer_server_variables.get('filebeat_key_path', + default_filebeat_key_path) + + default_filebeat_certificate_path = f"/etc/pki/filebeat/node-{manager_index}.pem" + filebeat_certificate = indexer_server_variables.get('filebeat_certificate_path', + default_filebeat_certificate_path) + + default_filebeat_root_ca_path = '/etc/pki/filebeat/root-ca.pem' + filebeat_root_ca = indexer_server_variables.get('filebeat_root_ca_path', + default_filebeat_root_ca_path) + indexer_server = indexer_server_variables.get('indexer_server', + indexer_server_variables['ip']) + + configuration_variables = { + 'VULNERABILITY_DETECTOR_ENABLE': vd_enable_value, + 'INDEXER_SERVER': indexer_server, + 'FILEBEAT_ROOT_CA': filebeat_root_ca, + 'FILEBEAT_CERTIFICATE': filebeat_certificate, + 'FILEBEAT_KEY': filebeat_key, + } + configuration_template_str = str(configuration_template) + + for key, value in configuration_variables.items(): + configuration_template_str = configuration_template_str.replace(key, value) + + configurations[host] = ast.literal_eval(configuration_template_str) + + return configurations diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py b/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py index 1661234336..5bdf3c153a 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/indexer_api.py @@ -18,11 +18,11 @@ from wazuh_testing.tools.system import HostManager -STATE_INDEX_NAME = 'wazuh-states-vulnerabilities' +WAZUH_STATES_VULNERABILITIES_INDEXNAME = 'wazuh-states-vulnerabilities' -def create_vulnerability_states_indexer_filter(target_agent: str | None = None, - greater_than_timestamp: str | None = None) -> dict: +def create_vulnerability_states_indexer_filter(target_agent: str = None, + greater_than_timestamp: str = None) -> dict: """Create a filter for the Indexer API for the vulnerability state index. Args: @@ -42,7 +42,7 @@ def create_vulnerability_states_indexer_filter(target_agent: str | None = None, return _create_filter(target_agent, timestamp_filter) -def create_alerts_filter(target_agent: str | None = None, greater_than_timestamp: str | None = None) -> dict: +def create_alerts_filter(target_agent: str = None, greater_than_timestamp: str = None) -> dict: """Create a filter for the Indexer API for the alerts index. Args: @@ -62,7 +62,7 @@ def create_alerts_filter(target_agent: str | None = None, greater_than_timestamp return _create_filter(target_agent, timestamp_filter) -def _create_filter(target_agent: str | None = None, timestamp_filter: dict | None = None) -> dict: +def _create_filter(target_agent: str = None, timestamp_filter: dict = None) -> dict: """Create a filter for the Indexer API. Args: @@ -89,7 +89,7 @@ def _create_filter(target_agent: str | None = None, timestamp_filter: dict | Non def get_indexer_values(host_manager: HostManager, credentials: dict = {'user': 'admin', 'password': 'changeme'}, - index: str = 'wazuh-alerts*', filter: dict | None = None, size: int = 10000) -> Dict: + index: str = 'wazuh-alerts*', filter: dict = None, size: int = 10000) -> Dict: """ Get values from the Wazuh Indexer API. diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py b/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py index 7fa59317c3..3369e3313e 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/logs.py @@ -14,7 +14,9 @@ Created by Wazuh, Inc. . This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -from typing import Dict +import re +from datetime import datetime +from typing import Dict, List from wazuh_testing import ALERTS_JSON_PATH from wazuh_testing.end_to_end import logs_filepath_os @@ -63,6 +65,51 @@ def get_hosts_logs(host_manager: HostManager, host_group: str = 'all') -> Dict[s return host_logs + +def check_errors_in_environment(host_manager: HostManager, greater_than_timestamp: str = '', + expected_errors: List[str] = None) -> dict: + """Check if there are errors in the environment + + Args: + host_manager (HostManager): An instance of the HostManager class. + greater_than_timestamp (str): Timestamp to filter the logs + expected_errors (List): List of expected errors. Default None + + Returns: + dict: Errors found in the environment + """ + + error_level_to_search = ['ERROR', 'CRITICAL', 'WARNING'] + expected_errors = expected_errors or [] + + environment_logs = get_hosts_logs(host_manager) + environment_level_logs = {} + + for host, environment_log in environment_logs.items(): + environment_level_logs[host] = {} + for level in error_level_to_search: + environment_level_logs[host][level] = [] + regex = re.compile(fr'((\d{{4}}\/\d{{2}}\/\d{{2}} \d{{2}}:\d{{2}}:\d{{2}}) (.+): ({level}):(.*))') + + matches = regex.findall(environment_log) + + for match in matches: + if not any(re.search(error, match[0]) for error in expected_errors): + if greater_than_timestamp: + date_format = "%Y/%m/%d %H:%M:%S" + default_tiemstamp_format = "%Y-%m-%dT%H:%M:%S" + + date_filter_format = datetime.strptime(greater_than_timestamp, default_tiemstamp_format) + log_date = datetime.strptime(match[1], date_format) + + if log_date > date_filter_format: + environment_level_logs[host][level].append(match[0]) + else: + environment_level_logs[host][level].append(match[0]) + + return environment_level_logs + + def get_hosts_alerts(host_manager: HostManager) -> Dict[str, str]: """ Get the alerts in the alert.json file from the specified host group. diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/monitoring.py b/deps/wazuh_testing/wazuh_testing/end_to_end/monitoring.py index 560eb0abf6..f4712baf0e 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/monitoring.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/monitoring.py @@ -15,17 +15,17 @@ This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 """ -import re import logging -from time import sleep +import re +from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime +from time import sleep from typing import Dict, List -from concurrent.futures import ThreadPoolExecutor, as_completed from wazuh_testing.end_to_end import logs_filepath_os +from wazuh_testing.end_to_end.regex import get_event_regex from wazuh_testing.tools.system import HostManager - DEFAULT_SCAN_INTERVAL = 5 @@ -98,7 +98,7 @@ def filter_events_by_timestamp(match_events: List) -> List: timestamp_str = match timestamp_format = "%Y/%m/%d %H:%M:%S" - timestamp_format_parameter = "%Y-%m-%dT%H:%M:%S.%f" + timestamp_format_parameter = "%Y-%m-%dT%H:%M:%S" try: timestamp_datetime = datetime.strptime(timestamp_str, timestamp_format) @@ -161,7 +161,7 @@ def filter_events_by_timestamp(match_events: List) -> List: with ThreadPoolExecutor() as executor: futures = [] for host, data in monitoring_data.items(): - futures.append(executor.submit(monitoring_event, host_manager, host, data, ignore_timeout_error, + futures.append(executor.submit(monitoring_event, host_manager, host, data, ignore_timeout_error, scan_interval)) results = {} @@ -219,3 +219,35 @@ def generate_monitoring_logs(host_manager: HostManager, regex_list: List[str], t }) return monitoring_data + + +def monitoring_syscollector_scan_agents(host_manager: HostManager, timeout: int, + greater_than_timestamp: str = '') -> list: + """Monitor syscollector scan on agents. + + Args: + host_manager (HostManager): An instance of the HostManager class. + timeout (int): The timeout value for monitoring. + greater_than_timestamp_formatted (str): Timestamp to filter agents logs. Default '' + + Returns: + list: A list of agents that were not scanned. + """ + agents_not_scanned = [] + + logging.info("Monitoring syscollector first scan") + list_hosts = host_manager.get_group_hosts('agent') + monitoring_data = generate_monitoring_logs(host_manager, + [get_event_regex({'event': 'syscollector_scan_start'}), + get_event_regex({'event': 'syscollector_scan_end'})], + [timeout, timeout], + list_hosts, greater_than_timestamp=greater_than_timestamp) + monitoring_results = monitoring_events_multihost(host_manager, monitoring_data) + + logging.info(f"Value of monitoring results is: {monitoring_results}") + + for agent in monitoring_results: + if monitoring_results[agent]['not_found']: + agents_not_scanned.append(agent) + + return agents_not_scanned diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py b/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py index 3477aa75c7..8631cfc354 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/remote_operations_handler.py @@ -12,7 +12,6 @@ - check_vuln_state_index: Check the vulnerability state index for a host. - check_vuln_alert_indexer: Check vulnerability alerts in the indexer for a host. - check_vuln_alert_api: Check vulnerability alerts via API for a host. - - launch_remote_sequential_operation_on_agent: Launch sequential remote operations on a specific agent. - launch_parallel_operations: Launch parallel remote operations on multiple hosts. @@ -20,201 +19,285 @@ Created by Wazuh, Inc. . This program is a free software; you can redistribute it and/or modify it under the terms of GPLv2 """ + import logging -from typing import Dict, List -from datetime import datetime, timezone +import threading from concurrent.futures import ThreadPoolExecutor -from wazuh_testing.end_to_end.waiters import wait_syscollector_and_vuln_scan +from typing import Any, Dict, List +from wazuh_testing.end_to_end.vulnerability_detector import (Vulnerability, + get_vulnerabilities_from_alerts_by_agent, + get_vulnerabilities_from_states_by_agent, + load_packages_metadata) from wazuh_testing.tools.system import HostManager -from wazuh_testing.end_to_end.vulnerability_detector import check_vuln_alert_indexer, check_vuln_state_index, \ - load_packages_metadata, parse_vulnerability_detector_alerts -from wazuh_testing.end_to_end.indexer_api import get_indexer_values, \ - create_vulnerability_states_indexer_filter, create_alerts_filter - - -def check_vulnerability_alerts(results: Dict, check_data: Dict, current_datetime: str, host_manager: HostManager, - host: str, - package_data: Dict, - operation: str = 'install') -> None: - - # Get all the alerts generated in the timestamp - vulnerability_alerts = {} - vulnerability_alerts_mitigated = {} - vulnerability_index = {} - - for agent in host_manager.get_group_hosts('agent'): - alerts_filter = create_alerts_filter(agent, current_datetime) - index_vuln_filter = create_vulnerability_states_indexer_filter(agent, current_datetime) - - agent_all_alerts = parse_vulnerability_detector_alerts(get_indexer_values(host_manager, - filter=alerts_filter)['hits']['hits']) - agent_all_vulnerabilities = get_indexer_values(host_manager, filter=index_vuln_filter, - index='wazuh-states-vulnerabilities')['hits']['hits'] - - vulnerability_alerts[agent] = agent_all_alerts['affected'] - vulnerability_alerts_mitigated[agent] = agent_all_alerts['mitigated'] - - vulnerability_index[agent] = agent_all_vulnerabilities - - results['evidences']['all_alerts_found'] = vulnerability_alerts - results['evidences']['all_alerts_found_mitigated'] = vulnerability_alerts_mitigated - results['evidences']['all_states_found'] = vulnerability_index - - # Check unexpected alerts. For installation/removal non vulnerable package - if 'no_alerts' in check_data and check_data['no_alerts']: - logging.info(f'Checking unexpected vulnerability alerts in the indexer for {host}') - results['evidences']["alerts_found_unexpected"] = { - "mitigated": vulnerability_alerts_mitigated, - "vulnerabilities": vulnerability_alerts - } - if len(results['evidences']['alerts_found_unexpected'].get('mitigated', [])) > 0 or \ - len(results['evidences']['alerts_found_unexpected'].get('vulnerabilities', [])) > 0: - results['checks']['all_successfull'] = False - - # Check expected alerts - elif check_data['alerts']: - logging.info(f'Checking vulnerability alerts for {host}') - if operation == 'update' or operation == 'remove': - evidence_key = "alerts_not_found_from" if operation == 'update' else "alerts_not_found" - package_data_to_use = package_data['from'] if operation == 'update' else package_data - # Check alerts from previous package are mitigated - results['evidences'][evidence_key] = check_vuln_alert_indexer(vulnerability_alerts_mitigated, - host, - package_data_to_use, - current_datetime) - elif operation == 'install' or operation == 'update': - # Check alerts from new package are found - evidence_key = "alerts_not_found_to" if operation == 'update' else "alerts_not_found" - package_data_to_use = package_data['to'] if operation == 'update' else package_data - results['evidences'][evidence_key] = check_vuln_alert_indexer(vulnerability_alerts, - host, - package_data_to_use, - current_datetime) - - if len(results['evidences'].get('alerts_not_found_from', [])) > 0 or \ - len(results['evidences'].get('alerts_not_found_to', [])) > 0 or \ - len(results['evidences'].get('alerts_not_found', [])) > 0: - results['checks']['all_successfull'] = False - - # Check unexpected states - if 'no_indices' in check_data and check_data['no_indices']: - logging.info(f'Checking vulnerability state index for {host}') - results['evidences']["states_found_unexpected"] = vulnerability_index - - if len(results['evidences']['states_found_unexpected']) > 0: - results['checks']['all_successfull'] = False - - elif check_data['state_index']: - if operation == 'update' or operation == 'remove': - evidence_key = 'states_found_unexpected_from' if operation == 'update' else 'states_found_unexpected' - package_data_to_use = package_data['from'] if operation == 'update' else package_data - # Check states from previous package are mitigated - results['evidences'][evidence_key] = check_vuln_state_index(host_manager, host, package_data_to_use, - current_datetime) - if len(results['evidences'][evidence_key]) != len(package_data_to_use['CVE']): - results['checks']['all_successfull'] = False - - elif operation == 'install' or operation == 'update': - # Check states from new package are found - evidence_key = 'states_not_found_to' if operation == 'update' else 'states_not_found' - package_data_to_use = package_data['to'] if operation == 'update' else package_data - results['evidences'][evidence_key] = check_vuln_state_index(host_manager, host, package_data_to_use, - current_datetime) - - if len(results['evidences'][evidence_key]) != 0: - results['checks']['all_successfull'] = False - - -def install_package(host: str, operation_data: Dict[str, Dict], host_manager: HostManager): - """ - Install a package on the specified host. - Args: - host (str): The target host on which to perform the operation. - operation_data (dict): Dictionary containing operation details. - host_manager (HostManager): An instance of the HostManager class containing information about hosts. - Raises: - ValueError: If the specified operation is not recognized. +def get_vulnerabilities_not_found(vulnerabilities_found: List, expected_vulnerabilities: List) -> List: """ - results = { - 'evidences': { - "alerts_not_found": [], - "states_not_found": [], - "alerts_found": [], - "states_found": [], - "alerts_found_unexpected": [], - "states_found_unexpected": [] - }, - 'checks': { - 'all_successfull': True - } - } + Get the vulnerabilities not found in the list of expected vulnerabilities. - logging.info(f"Installing package on {host}") - - host_os_name = host_manager.get_host_variables(host)['os'].split('_')[0] - host_os_arch = host_manager.get_host_variables(host)['architecture'] - system = host_manager.get_host_variables(host)['os_name'] - - if system == 'linux': - system = host_manager.get_host_variables(host)['os'].split('_')[0] - - install_package_data = operation_data['package'] - package_id = None - - if host_os_name in install_package_data: - try: - if host_os_arch in install_package_data[host_os_name]: - package_id = install_package_data[host_os_name][host_os_arch] - - package_data = load_packages_metadata()[package_id] - package_url = package_data['urls'][host_os_name][host_os_arch] - - logging.info(f"Installing package on {host}") - logging.info(f"Package URL: {package_url}") + Args: + vulnerabilities_found (list): List of vulnerabilities found. + expected_vulnerabilities (list): List of expected vulnerabilities. - current_datetime = datetime.now(timezone.utc).isoformat()[:-6] # Delete timezone offset - use_npm = package_data.get('use_npm', False) + Returns: + list: List of vulnerabilities not found. + """ + vulnerabilities_not_found = [] + for vulnerability in expected_vulnerabilities: + if vulnerability not in vulnerabilities_found: + vulnerabilities_not_found.append(vulnerability) - if use_npm: - host_manager.install_npm_package(host, package_url, system) - else: - host_manager.install_package(host, package_url, system) + return vulnerabilities_not_found - logging.info(f"Package {package_url} installed on {host}") - logging.info(f"Package installed on {host}") +def get_expected_vulnerabilities_for_package( + host_manager: HostManager, host: str, package_id: str +) -> list: - results['checks']['all_successfull'] = True + package_data = load_packages_metadata()[package_id] + vulnerabilities_list = [] - wait_is_required = 'check' in operation_data and (operation_data['check']['alerts'] or - operation_data['check']['state_index'] or - operation_data['check']['no_alerts'] or - operation_data['check']['no_indices']) + host_os_arch = host_manager.get_host_variables(host)["architecture"] + system = host_manager.get_host_variables(host)["os_name"] + use_npm = package_data.get('use_npm', False) - if wait_is_required: - wait_syscollector_and_vuln_scan(host_manager, host, operation_data, current_datetime) + architecture = '' + if use_npm: + architecture = '' + else: + if host_os_arch == "amd64": + architecture = "x86_64" + elif host_os_arch == "arm64v8": + architecture = "arm64" + else: + architecture = host_os_arch + + if system == "linux": + system = host_manager.get_host_variables(host)["os"].split("_")[0] + + for cve in package_data["CVE"]: + vulnerability = Vulnerability( + cve, + package_data["package_name"], + package_data["package_version"], + architecture, + ) + vulnerabilities_list.append(vulnerability) + + vulnerabilities = sorted( + vulnerabilities_list, + key=lambda x: (x.cve, x.package_name, x.package_version, x.architecture), + ) + + return vulnerabilities + + +def filter_vulnerabilities_by_packages(host_manager: HostManager, + vulnerabilities: Dict, packages_data: List) -> Dict: + filtered_vulnerabilities = {} + for host in vulnerabilities.keys(): + filtered_vulnerabilities[host] = [] + host_os_name = host_manager.get_host_variables(host)["os"].split("_")[0] + host_os_arch = host_manager.get_host_variables(host)["architecture"] + + for package_data in packages_data: + package_id = package_data[host_os_name][host_os_arch] + data = load_packages_metadata()[package_id] + package_name = data["package_name"] + + for vulnerability in vulnerabilities[host]: + if vulnerability.package_name == package_name: + filtered_vulnerabilities[host].append(vulnerability) + + return filtered_vulnerabilities + + +def get_expected_vulnerabilities_by_agent( + host_manager: HostManager, agents_list: List, packages_data: Dict +) -> Dict: + """ + Get the expected vulnerabilities by agent. - check_vulnerability_alerts(results, operation_data['check'], current_datetime, host_manager, host, - package_data, operation='install') + Args: + host_manager (HostManager): An instance of the HostManager class containing information about hosts. + packages_data (dict): Dictionary containing package data. - else: - logging.error(f"Error: Package for {host_os_name} and {host_os_arch} not found") + Returns: + dict: Dictionary containing the expected vulnerabilities by agent. + """ - except Exception as e: - logging.critical(f"Error searching package: {e}") + expected_vulnerabilities_by_agent = {} + for agent in agents_list: + host_os_name = host_manager.get_host_variables(agent)["os"].split("_")[0] + host_os_arch = host_manager.get_host_variables(agent)["architecture"] - else: - logging.info(f"No operation to perform on {host}") + expected_vulnerabilities_by_agent[agent] = [] + package_id = packages_data[host_os_name][host_os_arch] + expected_vulnerabilities = get_expected_vulnerabilities_for_package( + host_manager, agent, package_id + ) + expected_vulnerabilities_by_agent[agent] = expected_vulnerabilities + + return expected_vulnerabilities_by_agent + + +def get_package_url_for_host( + host: str, package_data: Dict[str, Any], host_manager: HostManager +) -> str: + + host_os_name = host_manager.get_host_variables(host)["os"].split("_")[0] + host_os_arch = host_manager.get_host_variables(host)["architecture"] + system = host_manager.get_host_variables(host)["os_name"] + + if system == "linux": + system = host_manager.get_host_variables(host)["os"].split("_")[0] + + try: + package_id = package_data[host_os_name][host_os_arch] + package_data = load_packages_metadata()[package_id] + package_url = package_data["urls"][host_os_name][host_os_arch] + + return package_url + except KeyError: + raise ValueError( + f"Package for {host_os_name} and {host_os_arch} not found. Maybe {host} OS is not supported." + ) + + +def get_package_npm( + host: str, package_data: Dict[str, Any], host_manager: HostManager +) -> bool: + host_os_name = host_manager.get_host_variables(host)["os"].split("_")[0] + host_os_arch = host_manager.get_host_variables(host)["architecture"] + system = host_manager.get_host_variables(host)["os_name"] + + if system == "linux": + system = host_manager.get_host_variables(host)["os"].split("_")[0] + + install_package_data = package_data + + package_id = install_package_data[host_os_name][host_os_arch] + package_data = load_packages_metadata()[package_id] + package_npm = package_data.get("use_npm", False) + + return package_npm + + +def get_package_uninstallation_name( + host: str, + package_id: str, + host_manager: HostManager, + operation_data: Dict[str, Any], +) -> str: + host_os_name = host_manager.get_host_variables(host)["os"].split("_")[0] + host_os_arch = host_manager.get_host_variables(host)["architecture"] + system = host_manager.get_host_variables(host)["os_name"] + + if system == "linux": + system = host_manager.get_host_variables(host)["os"].split("_")[0] + + install_package_data = operation_data["package"] + try: + package_id = install_package_data[host_os_name][host_os_arch] + package_data = load_packages_metadata()[package_id] + + if system == 'windows': + package_uninstall_name = package_data['product_id'] + else: + package_uninstall_name = package_data["uninstall_name"] + + return package_uninstall_name + except KeyError: + raise ValueError( + f"Package for {host_os_name} and {host_os_arch} not found uninstall name." + ) + + +def get_package_system(host: str, host_manager: HostManager) -> str: + system = host_manager.get_host_variables(host)["os_name"] + if system == "linux": + system = host_manager.get_host_variables(host)["os"].split("_")[0] + + return system + + +def get_vulnerability_alerts( + host_manager: HostManager, + agent_list, + packages_data: List, + greater_than_timestamp: str = "", +) -> Dict: + alerts = get_vulnerabilities_from_alerts_by_agent( + host_manager, agent_list, greater_than_timestamp=greater_than_timestamp + ) + alerts_vulnerabilities = filter_vulnerabilities_by_packages( + host_manager, alerts["affected"], packages_data + ) + alerts_vulnerabilities_mitigated = filter_vulnerabilities_by_packages( + host_manager, alerts["mitigated"], packages_data + ) return { - f"{host}": results + "affected": alerts_vulnerabilities, + "mitigated": alerts_vulnerabilities_mitigated, } -def remove_package(host: str, operation_data: Dict[str, Dict], host_manager: HostManager): +def get_vulnerabilities_index(host_manager: HostManager, agent_list, packages_data: List[Dict], + greater_than_timestamp: str = "") -> Dict: + vulnerabilities = get_vulnerabilities_from_states_by_agent(host_manager, agent_list, + greater_than_timestamp=greater_than_timestamp) + package_vulnerabilities = filter_vulnerabilities_by_packages(host_manager, vulnerabilities, packages_data) + + return package_vulnerabilities + + +def get_expected_alerts( + host_manager: HostManager, agent_list, operation: str, packages_data: Dict +) -> Dict: + expected_alerts_vulnerabilities = {"affected": {}, "mitigated": {}} + + if operation == "update_package": + expected_alerts_vulnerabilities["mitigated"] = ( + get_expected_vulnerabilities_by_agent( + host_manager, agent_list, packages_data["from"] + ) + ) + expected_alerts_vulnerabilities["affected"] = ( + get_expected_vulnerabilities_by_agent( + host_manager, agent_list, packages_data["to"] + ) + ) + elif operation == "remove_package": + expected_alerts_vulnerabilities["mitigated"] = ( + get_expected_vulnerabilities_by_agent( + host_manager, agent_list, packages_data + ) + ) + elif operation == "install_package": + expected_alerts_vulnerabilities["affected"] = ( + get_expected_vulnerabilities_by_agent( + host_manager, agent_list, packages_data + ) + ) + + return expected_alerts_vulnerabilities + + +def get_expected_index(host_manager: HostManager, agent_list, operation: str, packages_data: Dict) -> Dict: + expected_index = {} + if operation == "update_package": + expected_index = get_expected_vulnerabilities_by_agent(host_manager, agent_list, packages_data["to"]) + elif operation == "install_package": + expected_index = get_expected_vulnerabilities_by_agent(host_manager, agent_list, packages_data) + + return expected_index + + +def install_package( + host: str, operation_data: Dict[str, Any], host_manager: HostManager +) -> bool: """ Install a package on the specified host. @@ -226,76 +309,31 @@ def remove_package(host: str, operation_data: Dict[str, Dict], host_manager: Hos Raises: ValueError: If the specified operation is not recognized. """ - logging.info(f"Removing package on {host}") - results = { - 'evidences': { - "alerts_not_found": [], - "states_not_found": [], - "alerts_found": [], - "states_found": [], - "alerts_found_unexpected": [], - "states_found_unexpected": [] - }, - 'checks': { - 'all_successfull': True - } - } - host_os_name = host_manager.get_host_variables(host)['os'].split('_')[0] - host_os_arch = host_manager.get_host_variables(host)['architecture'] - system = host_manager.get_host_variables(host)['os_name'] - if system == 'linux': - system = host_manager.get_host_variables(host)['os'].split('_')[0] - - package_data = operation_data['package'] - package_id = None - - if host_os_name in package_data: - try: - if host_os_arch in package_data[host_os_name]: - package_id = package_data[host_os_name][host_os_arch] + package = operation_data['package'] + if 'to' in operation_data['package'].keys(): + package = package['to'] - package_data = load_packages_metadata()[package_id] - use_npm = package_data.get('use_npm', False) - - current_datetime = datetime.now(timezone.utc).isoformat()[:-6] # Delete timezone offset - - logging.info(f"Removing package on {host}") - if 'uninstall_name' in package_data: - uninstall_name = package_data['uninstall_name'] - if use_npm: - host_manager.remove_npm_package(host, system, package_uninstall_name=uninstall_name) - else: - host_manager.remove_package(host, system, package_uninstall_name=uninstall_name) - elif 'uninstall_custom_playbook' in package_data: - host_manager.remove_package(host, system, - custom_uninstall_playbook=package_data['uninstall_custom_playbook']) - - wait_is_required = 'check' in operation_data and (operation_data['check']['alerts'] or - operation_data['check']['state_index'] or - operation_data['check']['no_alerts'] or - operation_data['check']['no_indices']) - - if wait_is_required: - wait_syscollector_and_vuln_scan(host_manager, host, operation_data, current_datetime) - - check_vulnerability_alerts(results, operation_data['check'], current_datetime, host_manager, host, - package_data, operation='remove') - - else: - logging.error(f"Error: Package for {host_os_name} and {host_os_arch} not found") + result = True + logging.info(f"Installing package on {host}") + package_url = get_package_url_for_host( + host, package, host_manager + ) + package_system = get_package_system(host, host_manager) + npm_package = get_package_npm(host, package, host_manager) - except Exception as e: - logging.critical(f"Error searching package: {e}") + try: + if npm_package: + host_manager.install_npm_package(host, package_url, package_system) + else: + host_manager.install_package(host, package_url, package_system) + except Exception as e: + logging.error(f"Error installing package on {host}: {e}") + result = False - else: - logging.info(f"No operation to perform on {host}") - - return { - f"{host}": results - } + return result -def update_package(host: str, operation_data: Dict[str, Dict], host_manager: HostManager): +def remove_package(host: str, operation_data: Dict[str, Any], host_manager: HostManager) -> bool: """ Install a package on the specified host. @@ -307,113 +345,73 @@ def update_package(host: str, operation_data: Dict[str, Dict], host_manager: Hos Raises: ValueError: If the specified operation is not recognized. """ - logging.info(f"Updating package on {host}") - results = { - 'evidences': { - "alerts_not_found_from": [], - 'alerts_found_from': [], - "alerts_found": [], - "states_found": [], - "alerts_found_unexpected": [], - "states_found_unexpected": [] - }, - 'checks': { - 'all_successfull': True - } - - } - - host_os_name = host_manager.get_host_variables(host)['os'].split('_')[0] - host_os_arch = host_manager.get_host_variables(host)['architecture'] - system = host_manager.get_host_variables(host)['os_name'] - if system == 'linux': - system = host_manager.get_host_variables(host)['os'].split('_')[0] - - install_package_data_from = operation_data['package']['from'] - install_package_data_to = operation_data['package']['to'] + result = True + logging.info(f"Removing package on {host}") + package_system = get_package_system(host, host_manager) - package_id_from = None - package_id_to = None + try: + package_uninstall_name = None + custom_uninstall_playbook = None + package_data = operation_data['package'] - if host_os_name in install_package_data_from: - try: - if host_os_arch in install_package_data_from[host_os_name]: - package_id_from = install_package_data_from[host_os_name][host_os_arch] - else: - logging.error(f"Error: Package for {host_os_name} and {host_os_arch} not found") - except Exception as e: - logging.critical(f"Error searching package: {e}") - - if host_os_name in install_package_data_to: + npm_package = get_package_npm(host, package_data, host_manager) try: - if host_os_arch in install_package_data_to[host_os_name]: - package_id_to = install_package_data_to[host_os_name][host_os_arch] - - package_data_from = load_packages_metadata()[package_id_from] - package_data_to = load_packages_metadata()[package_id_to] - - package_url_to = package_data_to['urls'][host_os_name][host_os_arch] - - logging.info(f"Installing package on {host}") - logging.info(f"Package URL: {package_url_to}") - - current_datetime = datetime.now(timezone.utc).isoformat()[:-6] # Delete timezone offset - use_npm = package_data_to.get('use_npm', False) - - if use_npm: - host_manager.install_npm_package(host, package_url_to, system) - else: - host_manager.install_package(host, package_url_to, system) - - logging.info(f"Package {package_url_to} installed on {host}") - - logging.info(f"Package installed on {host}") - - wait_is_required = 'check' in operation_data and (operation_data['check']['alerts'] or - operation_data['check']['state_index'] or - operation_data['check']['no_alerts'] or - operation_data['check']['no_indices']) - if wait_is_required: - wait_syscollector_and_vuln_scan(host_manager, host, operation_data, current_datetime) - - check_vulnerability_alerts(results, operation_data['check'], current_datetime, host_manager, host, - {'from': package_data_from, 'to': package_data_to}, operation='update') - - else: - logging.error(f"Error: Package for {host_os_name} and {host_os_arch} not found") - - except Exception as e: - logging.critical(f"Error searching package: {e}") - - else: - logging.info(f"No operation to perform on {host}") - - return { - f"{host}": results - } - - -def launch_remote_sequential_operation_on_agent(agent: str, task_list: List[Dict], host_manager: HostManager): - """ - Launch sequential remote operations on an agent. - - Args: - agent (str): The target agent on which to perform the operations. - task_list (list): List of dictionaries containing operation details. - host_manager (HostManager): An instance of the HostManager class containing information about hosts. - """ - # Convert datetime to Unix timestamp (integer) - timestamp = datetime.now(timezone.utc).isoformat()[:-6] # Delete timezone offset - - if task_list: - for task in task_list: - operation = task['operation'] - if operation in locals(): - locals()[operation](agent, task, host_manager, timestamp) - - -def launch_remote_operation(host: str, operation_data: Dict[str, Dict], host_manager: HostManager): - operation = operation_data['operation'] + package_uninstall_name = get_package_uninstallation_name( + host, package_data, host_manager, operation_data + ) + except ValueError: + logging.info( + f"No uninstall name found for {operation_data['package']}. Searching for custom playbook" + ) + custom_uninstall_playbook = ( + package_data["uninstall_playbook"] + if "uninstall_playbook" in package_data + else None + ) + + if npm_package: + host_manager.remove_npm_package( + host, package_system, package_uninstall_name, custom_uninstall_playbook + ) + else: + host_manager.remove_package( + host, package_system, package_uninstall_name, custom_uninstall_playbook + ) + + except Exception as e: + logging.error(f"Error removing package on {host}: {e}") + result = False + + return result + + +def update_package( + host: str, operation_data: Dict[str, Any], host_manager: HostManager +) -> bool: + result = True + logging.info(f"Installing package on {host}") + package_url = get_package_url_for_host( + host, operation_data["package"]["to"], host_manager + ) + package_system = get_package_system(host, host_manager) + npm_package = get_package_npm(host, operation_data['package']['to'], host_manager) + + try: + if npm_package: + host_manager.install_npm_package(host, package_url, package_system) + else: + host_manager.install_package(host, package_url, package_system) + except Exception as e: + logging.error(f"Error installing package on {host}: {e}") + result = False + + return result + + +def launch_remote_operation( + host: str, operation_data: Dict[str, Dict], host_manager: HostManager +): + operation = operation_data["operation"] if operation in globals(): operation_result = globals()[operation](host, operation_data, host_manager) logging.info(f"Operation result: {operation_result}") @@ -422,37 +420,60 @@ def launch_remote_operation(host: str, operation_data: Dict[str, Dict], host_man raise ValueError(f"Operation {operation} not recognized") -def launch_parallel_operations(task_list: List[Dict], host_manager: HostManager, target_to_ignore: List[str] = []): +def filter_hosts_by_os(host_manager: HostManager, os_list: List[str]) -> List[str]: + agents = host_manager.get_group_hosts('agent') + agents_target_os = [] + for agent in agents: + system = host_manager.get_host_variables(agent)["os_name"] + + if system == "linux": + system = host_manager.get_host_variables(agent)["os"].split("_")[0] + + if system in os_list: + agents_target_os.append(agent) + + return agents_target_os + + +def launch_parallel_operations(task: Dict[str, List], host_manager: HostManager, + target_to_ignore: List[str] = None): """ Launch parallel remote operations on multiple hosts. Args: - task_list (list): List of dictionaries containing operation details. + operation (list): List of dictionaries containing operation details. host_manager (HostManager): An instance of the HostManager class containing information about hosts. """ - results = {} - if target_to_ignore: - for target in results: - results[target]['checks']['all_successfull'] = False + hosts_to_ignore = target_to_ignore if target_to_ignore else [] + target = "agent" + results = {} + lock = threading.Lock() def launch_and_store_result(args): host, task, manager = args result = launch_remote_operation(host, task, manager) - results.update(result) + with lock: + results[host] = result with ThreadPoolExecutor() as executor: # Submit tasks asynchronously + hosts_target = host_manager.get_group_hosts(target) + futures = [] - for task in task_list: - hosts_target = host_manager.get_group_hosts(task['target']) - if target_to_ignore: - hosts_target = [host for host in hosts_target if host not in target_to_ignore] - logging.info("Hosts target after removing ignored targets: {}".format(hosts_target)) + # Calculate the hosts to ignore based on previous operations results + if hosts_to_ignore: + hosts_target = [ + host for host in hosts_target if host not in hosts_to_ignore + ] + + logging.info(f"Launching operation {task['operation']} on {hosts_target}") - for host in hosts_target: - futures.append(executor.submit(launch_and_store_result, (host, task, host_manager))) + for host in hosts_target: + futures.append( + executor.submit(launch_and_store_result, (host, task, host_manager)) + ) # Wait for all tasks to complete for future in futures: diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/utils.py b/deps/wazuh_testing/wazuh_testing/end_to_end/utils.py new file mode 100644 index 0000000000..b084593317 --- /dev/null +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/utils.py @@ -0,0 +1,18 @@ +import yaml + + +def load_test_cases(file_path): + with open(file_path, "r") as cases_file: + cases = yaml.load(cases_file, Loader=yaml.FullLoader) + return cases if cases else [] + + +def extract_case_info(cases): + return [ + (case.get("preconditions"), case.get("body"), case.get("teardown")) + for case in cases + ] + + +def get_case_ids(cases): + return [case["id"] for case in cases] diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py index 98f36a79d5..2850b1e23f 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector.py @@ -23,12 +23,16 @@ from typing import Dict, List from wazuh_testing.tools.system import HostManager -from wazuh_testing.end_to_end.indexer_api import get_indexer_values, create_vulnerability_states_indexer_filter +from wazuh_testing.end_to_end.indexer_api import get_indexer_values, create_vulnerability_states_indexer_filter, \ + create_alerts_filter, WAZUH_STATES_VULNERABILITIES_INDEXNAME from wazuh_testing.end_to_end.regex import REGEX_PATTERNS from collections import namedtuple -Vulnerability = namedtuple('Vulnerability', ['cve', 'package_name', 'package_version', 'type', 'architecture']) +PACKAGE_VULNERABILITY_SCAN_TIME = 60 +TIMEOUT_PER_AGENT_VULNERABILITY_FIRST_SCAN = PACKAGE_VULNERABILITY_SCAN_TIME * 3 + +Vulnerability = namedtuple('Vulnerability', ['cve', 'package_name', 'package_version', 'architecture']) def load_packages_metadata() -> Dict: @@ -78,10 +82,12 @@ def check_vuln_state_index(host_manager: HostManager, host: str, package: Dict[s package (dict): Dictionary containing package data. current_datetime (str): Datetime to filter the vulnerability state index. """ - filter = create_vulnerability_states_indexer_filter(host, current_datetime) - index_vuln_state_content = get_indexer_values(host_manager, - index='wazuh-states-vulnerabilities', - filter=filter)['hits']['hits'] + indexer_user, indexer_password = host_manager.get_indexer_credentials() + filter = create_vulnerability_states_indexer_filter(target_agent=host, greater_than_timestamp=current_datetime) + index_vuln_state_content = get_indexer_values(host_manager, index=WAZUH_STATES_VULNERABILITIES_INDEXNAME, + filter=filter, + credentials={'user': indexer_user, + 'password': indexer_password})['hits']['hits'] expected_alerts_not_found = [] logging.info(f"Checking vulnerability state index {package}") @@ -96,11 +102,11 @@ def check_vuln_state_index(host_manager: HostManager, host: str, package: Dict[s 'package_version': package['package_version'] } - for indice_vuln in index_vuln_state_content: - state_agent = indice_vuln['_source']['agent']['name'] - state_cve = indice_vuln["_source"]['vulnerability']['id'] - state_package_name = indice_vuln['_source']['package']['name'] - state_package_version = indice_vuln['_source']['package']['version'] + for index_vuln in index_vuln_state_content: + state_agent = index_vuln['_source']['agent']['name'] + state_cve = index_vuln["_source"]['vulnerability']['id'] + state_package_name = index_vuln['_source']['package']['name'] + state_package_version = index_vuln['_source']['package']['version'] if state_agent == host and state_cve == vulnerability \ and state_package_name == package['package_name'] and \ @@ -114,7 +120,7 @@ def check_vuln_state_index(host_manager: HostManager, host: str, package: Dict[s return expected_alerts_not_found -def parse_vulnerability_detector_alerts(alerts) -> Dict: +def get_vulnerability_detector_alerts(alerts: List) -> Dict: """ Parse vulnerability detector alerts. @@ -141,6 +147,44 @@ def parse_vulnerability_detector_alerts(alerts) -> Dict: return vulnerability_detector_alerts +def parse_vulnerabilities_from_alerts(vulnerabilities_alerts: List) -> List: + """Parse vulnerabilities from the vulnerability detector alerts. + + Args: + vulnerabilities_alerts (list): List of vulnerabilities from the vulnerability detector alerts. + + Returns: + list: List of vulnerabilities sorted by cve, package_name, package_version, and architecture. + """ + vulnerabilities = [] + + for alert in vulnerabilities_alerts: + try: + architecture = alert['_source']['data']['vulnerability']['package']['architecture'] if \ + 'vulnerability' in alert['_source']['data'] and \ + 'package' in alert['_source']['data']['vulnerability'] and \ + 'architecture' in alert['_source']['data']['vulnerability']['package'] else "" + + if architecture == 'amd64': + architecture = 'x86_64' + elif architecture == 'aarch64': + architecture = 'arm64' + + vulnerability = Vulnerability( + cve=alert['_source']['data']['vulnerability']['cve'], + package_name=alert['_source']['data']['vulnerability']['package']['name'], + package_version=alert['_source']['data']['vulnerability']['package']['version'], + architecture=architecture + ) + vulnerabilities.append(vulnerability) + except KeyError as e: + logging.error(f"Error parsing vulnerability: {alert}: {str(e)}") + + vulnerabilities = sorted(vulnerabilities, key=lambda x: (x.cve, x.package_name, x.package_version, x.architecture)) + + return vulnerabilities + + def get_indexed_vulnerabilities_by_agent(indexed_vulnerabilities) -> Dict: """Get indexed vulnerabilities by agent. @@ -304,27 +348,85 @@ def get_vulnerabilities_from_states(vulnerabilities_states: List) -> List: vulnerabilities = [] for state_vulnerability in vulnerabilities_states: + + architecture = state_vulnerability['_source']['package']['architecture'] \ + if 'package' in state_vulnerability['_source'] and \ + 'architecture' in state_vulnerability['_source']['package'] else "" + + if architecture == 'amd64': + architecture = 'x86_64' + elif architecture == 'aarch64': + architecture = 'arm64' + try: vulnerability = Vulnerability( cve=state_vulnerability['_source']['vulnerability']['id'], package_name=(state_vulnerability['_source']['package']['name'] if 'package' in state_vulnerability['_source'] - and 'name' in state_vulnerability['_source']['package'] else None), + and 'name' in state_vulnerability['_source']['package'] else ""), package_version=(state_vulnerability['_source']['package']['version'] if 'package' in state_vulnerability['_source'] - and 'version' in state_vulnerability['_source']['package'] else None), - type=(state_vulnerability['_source']['package']['type'] - if 'package' in state_vulnerability['_source'] - and 'type' in state_vulnerability['_source']['package'] else None), - architecture=(state_vulnerability['_source']['package']['architecture'] - if 'package' in state_vulnerability['_source'] - and 'architecture' in state_vulnerability['_source']['package'] else None) + and 'version' in state_vulnerability['_source']['package'] else ""), + architecture=architecture ) vulnerabilities.append(vulnerability) - except KeyError: + except KeyError as e: logging.error(f"Error parsing vulnerability: {state_vulnerability}") - raise KeyError + raise e vulnerabilities = sorted(vulnerabilities, key=lambda x: (x.cve, x.package_name, x.package_version, x.architecture)) return vulnerabilities + + +def get_vulnerabilities_from_states_by_agent(host_manager: HostManager, agents: List[str], + greater_than_timestamp: str = None) -> dict: + vuln_by_agent_index = {} + indexer_user, indexer_password = host_manager.get_indexer_credentials() + + for agent in agents: + agent_all_vulnerabilities = [] + try: + filter = create_vulnerability_states_indexer_filter(target_agent=agent, + greater_than_timestamp=greater_than_timestamp) + agent_all_vulnerabilities = get_indexer_values(host_manager, + filter=filter, + index=WAZUH_STATES_VULNERABILITIES_INDEXNAME, + credentials={'user': indexer_user, + 'password': indexer_password} + )['hits']['hits'] + except KeyError as e: + logging.error(f"No vulnerabilities were obtained for {agent}. Exception {str(e)}") + + vuln_by_agent_index[agent] = get_vulnerabilities_from_states(agent_all_vulnerabilities) + + return vuln_by_agent_index + + +def get_vulnerabilities_from_alerts_by_agent(host_manager: HostManager, agents: List[str], greater_than_timestamp: str): + vuln_by_agent_index = { + 'mitigated': {}, + 'affected': {} + } + indexer_user, indexer_password = host_manager.get_indexer_credentials() + + for agent in agents: + agent_all_vulnerabilities = [] + try: + filter = create_alerts_filter(target_agent=agent, greater_than_timestamp=greater_than_timestamp) + vuln_by_agent_index['mitigated'][agent] = [] + agent_all_alerts = get_indexer_values(host_manager, + filter=filter, + credentials={'user': indexer_user, + 'password': indexer_password})['hits']['hits'] + agent_all_vulnerabilities = get_vulnerability_detector_alerts(agent_all_alerts) + parsed_vulnerabilities_mitigated = parse_vulnerabilities_from_alerts(agent_all_vulnerabilities['mitigated']) + parsed_vulnerabilities_affected = parse_vulnerabilities_from_alerts(agent_all_vulnerabilities['affected']) + + vuln_by_agent_index['mitigated'][agent] = parsed_vulnerabilities_mitigated + vuln_by_agent_index['affected'][agent] = parsed_vulnerabilities_affected + + except KeyError as e: + logging.error(f"No vulnerabilities were obtained for {agent}. Exception {str(e)}") + + return vuln_by_agent_index diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json index c648a1a04c..718729aec4 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/vulnerability_detector_packages/vuln_packages.json @@ -32,7 +32,8 @@ "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", - "CVE-2022-23498" + "CVE-2022-23498", + "CVE-2023-3128" ], "urls": { "ubuntu": { @@ -62,7 +63,8 @@ "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", - "CVE-2022-23498" + "CVE-2022-23498", + "CVE-2023-3128" ], "urls": { "centos": { @@ -416,7 +418,7 @@ "uninstall_custom_playbook": "remove_vlc_win.yml" }, "node-v17.0.1": { - "package_name": "node", + "package_name": "Node.js", "package_version": "17.0.1", "CVE": [ "CVE-2022-21824", @@ -431,10 +433,11 @@ "amd64": "https://nodejs.org/dist/v17.0.1/node-v17.0.1-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{A08E96EF-30FF-41D9-9B06-EF2E86638B28}" }, "node-v17.1.0": { - "package_name": "node", + "package_name": "Node.js", "package_version": "17.1.0", "CVE": [ "CVE-2022-21824", @@ -449,10 +452,12 @@ "amd64": "https://nodejs.org/dist/v17.1.0/node-v17.1.0-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{391CF67B-4D6E-46F2-BE9B-BEB71FA31FBF}" + }, "node-v18.0.0": { - "package_name": "node", + "package_name": "Node.js", "package_version": "18.0.0", "CVE": [ "CVE-2023-44487", @@ -486,10 +491,11 @@ "amd64": "https://nodejs.org/dist/v18.0.0/node-v18.0.0-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{4A5B4D0F-EC0F-40F9-99E8-0E2048283DC8}" }, "node-v18.1.0": { - "package_name": "node", + "package_name": "Node.js", "package_version": "18.1.0", "CVE": [ "CVE-2023-44487", @@ -522,7 +528,8 @@ "amd64": "https://nodejs.org/dist/v18.1.0/node-v18.1.0-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{E82BEA0E-E1A9-4CA4-8922-5E34580A5423}" }, "node-v18.20.0": { "package_name": "node", @@ -533,7 +540,9 @@ "amd64": "https://nodejs.org/dist/v18.20.0/node-v18.20.0-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{ED801E31-1556-48A1-AC38-BADEF42169B0}" + }, "node-v18.20.2": { "package_name": "node", @@ -544,10 +553,11 @@ "amd64": "https://nodejs.org/dist/v18.20.2/node-v18.20.2-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{2484FA32-4508-4229-B443-7AA07AD22780}" }, "node-v19.5.0": { - "package_name": "node", + "package_name": "Node.js", "package_version": "19.5.0", "CVE": [ "CVE-2023-23936", @@ -559,10 +569,11 @@ "amd64": "https://nodejs.org/dist/v19.5.0/node-v19.5.0-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{60979FBD-C66C-4EB9-971C-F4BF5D4DBA6D}" }, "node-v19.6.0": { - "package_name": "node", + "package_name": "Node.js", "package_version": "19.6.0", "CVE": [ "CVE-2023-23936", @@ -574,10 +585,11 @@ "amd64": "https://nodejs.org/dist/v19.6.0/node-v19.6.0-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{25C3DFC0-0007-4456-96D8-C621037D53EB}" }, "node-v20.5.1": { - "package_name": "node", + "package_name": "Node.js", "package_version": "20.5.1", "CVE": [ "CVE-2023-44487", @@ -590,7 +602,8 @@ "amd64": "https://nodejs.org/dist/v20.5.1/node-v20.5.1-x64.msi" } }, - "uninstall_name": "node*" + "uninstall_name": "node*", + "product_id": "{5674EEF9-AA04-4DEC-9191-67D745D090A2}" }, "lynx-2.8.8": { "package_name": "lynx", diff --git a/deps/wazuh_testing/wazuh_testing/end_to_end/waiters.py b/deps/wazuh_testing/wazuh_testing/end_to_end/waiters.py index b870657c3b..101148390b 100644 --- a/deps/wazuh_testing/wazuh_testing/end_to_end/waiters.py +++ b/deps/wazuh_testing/wazuh_testing/end_to_end/waiters.py @@ -82,19 +82,22 @@ def wait_until_vd_is_updated(host_manager: HostManager) -> None: else: logging.info("Scanner start log not found") -def wait_until_vuln_scan_agents_finished(host_manager: HostManager) -> None: +def wait_until_vuln_scan_agents_finished(host_manager: HostManager, agent_list: list = None) -> None: """ Wait until vulnerability scans for all agents are finished. Args: host_manager (HostManager): Host manager instance to handle the environment. """ - final_timeout = VD_INITIAL_SCAN_PER_AGENT_TIMEOUT * len(get_agents_id(host_manager)) + hosts_to_wait = agent_list if agent_list else host_manager.get_group_hosts('agent') + final_timeout = VD_INITIAL_SCAN_PER_AGENT_TIMEOUT * len(hosts_to_wait) + time.sleep(final_timeout) -def wait_syscollector_and_vuln_scan(host_manager: HostManager, host: str, operation_data: Dict, - current_datetime: str = '') -> None: +def wait_syscollector_and_vuln_scan(host_manager: HostManager, syscollector_scan: int, + greater_than_timestamp: str = '', + agent_list: list = None) -> None: """ Wait until syscollector and vulnerability scans are finished for a specific host. @@ -104,25 +107,18 @@ def wait_syscollector_and_vuln_scan(host_manager: HostManager, host: str, opera operation_data (Dict): Dictionary with the operation data. current_datetime (str): Current datetime to use in the operation. """ - logging.info(f"Waiting for syscollector scan to finish on {host}") - - timeout_syscollector_scan = TIMEOUT_SYSCOLLECTOR_SHORT_SCAN if 'timeout_syscollector_scan' not in \ - operation_data else operation_data['timeout_syscollector_scan'] + logging.info(f"Waiting for syscollector scan to finish in all hosts") + hosts_to_wait = agent_list if agent_list else host_manager.get_group_hosts('agent') # Wait until syscollector monitoring_data = generate_monitoring_logs(host_manager, [get_event_regex({'event': 'syscollector_scan_start'}), get_event_regex({'event': 'syscollector_scan_end'})], - [timeout_syscollector_scan, timeout_syscollector_scan], - host_manager.get_group_hosts('agent'), - greater_than_timestamp=current_datetime) - - truncate_remote_host_group_files(host_manager, host_manager.get_group_hosts('agent')) + [syscollector_scan, syscollector_scan], + hosts_to_wait, greater_than_timestamp=greater_than_timestamp) monitoring_events_multihost(host_manager, monitoring_data, ignore_timeout_error=False) - logging.info(f"Waiting for vulnerability scan to finish on {host}") - - wait_until_vuln_scan_agents_finished(host_manager) + logging.info(f"Waiting for vulnerability scan to finish") - logging.info(f"Checking agent vulnerability on {host}") + wait_until_vuln_scan_agents_finished(host_manager, agent_list=agent_list) diff --git a/deps/wazuh_testing/wazuh_testing/tools/system.py b/deps/wazuh_testing/wazuh_testing/tools/system.py index 93829ac8a4..254a5b387d 100644 --- a/deps/wazuh_testing/wazuh_testing/tools/system.py +++ b/deps/wazuh_testing/wazuh_testing/tools/system.py @@ -3,21 +3,23 @@ # This program is free software; you can redistribute it and/or modify it under the terms of GPLv2 import json -import tempfile -import sys -import os import logging +import os +import sys +import tempfile import xml.dom.minidom as minidom -from typing import Union, List +from threading import Thread +from typing import List, Union + import testinfra import yaml - -from wazuh_testing.tools import WAZUH_CONF, WAZUH_API_CONF, API_LOG_FILE_PATH, WAZUH_LOCAL_INTERNAL_OPTIONS -from wazuh_testing.tools.configuration import set_section_wazuh_conf from ansible.inventory.manager import InventoryManager from ansible.parsing.dataloader import DataLoader from ansible.vars.manager import VariableManager +from wazuh_testing.tools import (API_LOG_FILE_PATH, WAZUH_API_CONF, WAZUH_CONF, + WAZUH_LOCAL_INTERNAL_OPTIONS) +from wazuh_testing.tools.configuration import set_section_wazuh_conf logger = logging.getLogger('testinfra') logger.setLevel(logging.CRITICAL) @@ -475,7 +477,7 @@ def download_file(self, host, url, dest_path, mode='755'): return result - def install_package(self, host, url, system='ubuntu'): + def install_package(self, host, url, system): """ Installs a package on the specified host. @@ -491,8 +493,8 @@ def install_package(self, host, url, system='ubuntu'): Example: host_manager.install_package('my_host', 'http://example.com/package.deb', system='ubuntu') """ - result = False extension = '.msi' + result = None if system == 'windows': if url.lower().endswith(extension): @@ -501,8 +503,6 @@ def install_package(self, host, url, system='ubuntu'): result = self.get_host(host).ansible("win_package", f"path={url} arguments=/S", check=False) elif system == 'ubuntu': result = self.get_host(host).ansible("apt", f"deb={url}", check=False) - if result['changed'] and result['stderr'] == '': - result = True elif system == 'centos': result = self.get_host(host).ansible("yum", f"name={url} state=present " 'sslverify=false disable_gpg_check=True', check=False) @@ -511,10 +511,13 @@ def install_package(self, host, url, system='ubuntu'): result = self.get_host(host).ansible("command", f"curl -LO {url}", check=False) cmd = f"installer -pkg {package_name} -target /" result = self.get_host(host).ansible("command", cmd, check=False) + else: + raise ValueError(f"Unsupported system: {system}") logging.info(f"Package installed result {result}") - return result + if not (result['changed'] or result.get('rc') == 0) or not (result['changed'] or result.get('stderr', None) == ''): + raise RuntimeError(f"Failed to install package in {host}: {result}") def install_npm_package(self, host, url, system='ubuntu'): """ @@ -616,8 +619,8 @@ def remove_package(self, host, system, package_uninstall_name=None, custom_unins remove_operation_result = self.run_playbook(host, custom_uninstall_playbook) elif package_uninstall_name: if os_name == 'windows': - remove_operation_result = self.get_host(host).ansible("win_command", - f"{package_uninstall_name} /uninstall /quiet /S", + remove_operation_result = self.get_host(host).ansible("win_package", + f"product_id={package_uninstall_name} state=absent", check=False) elif os_name == 'linux': os = self.get_host_variables(host)['os'].split('_')[0] @@ -634,6 +637,10 @@ def remove_package(self, host, system, package_uninstall_name=None, custom_unins f"brew uninstall {package_uninstall_name}", check=False) + if not (remove_operation_result['changed'] or remove_operation_result.get('rc') == 0) \ + or not (remove_operation_result['changed'] or remove_operation_result.get('stderr', None) == ''): + raise RuntimeError(f"Failed to remove package in {host}: {remove_operation_result}") + logging.info(f"Package removed result {remove_operation_result}") return remove_operation_result @@ -763,7 +770,7 @@ def handle_wazuh_services(self, host, operation): return result - def control_environment(self, operation, group_list): + def control_environment(self, operation, group_list, parallel=False): """ Controls the Wazuh services on hosts in the specified groups. @@ -774,9 +781,20 @@ def control_environment(self, operation, group_list): Example: control_environment('restart', ['group1', 'group2']) """ - for group in group_list: - for host in self.get_group_hosts(group): - self.handle_wazuh_services(host, operation) + if parallel: + threads = [] + for group in group_list: + for host in self.get_group_hosts(group): + thread = Thread(target=self.handle_wazuh_services, args=(host, operation)) + threads.append(thread) + thread.start() + + for thread in threads: + thread.join() + else: + for group in group_list: + for host in self.get_group_hosts(group): + self.handle_wazuh_services(host, operation) def get_agents_ids(self): """ @@ -788,7 +806,9 @@ def get_agents_ids(self): Returns: str: The ID of the agent. """ - token = self.get_api_token(self.get_master()) + user, password = self.get_api_credentials() + + token = self.get_api_token(self.get_master(), user=user, password=password) agents = self.make_api_call(self.get_master(), endpoint='/agents/', token=token)['json']['data'] agents_ids = [] @@ -799,6 +819,32 @@ def get_agents_ids(self): return agents_ids + def get_api_credentials(self): + default_user = 'wazuh' + default_password = 'wazuh' + + master_variables = self.get_host_variables(self.get_master()) + + user = master_variables.get('api_user', default_user) + password = master_variables.get('api_password', default_password) + + return user, password + + def get_indexer_credentials(self): + default_user = 'admin' + default_password = 'changeme' + + try: + indexer_variables = self.get_host_variables(self.get_group_hosts('indexer')[0]) + except IndexError: + logging.critical("Indexer not found in inventory") + raise + + user = indexer_variables.get('indexer_user', default_user) + password = indexer_variables.get('indexer_password', default_password) + + return user, password + def remove_agents(self): """ Removes all the agents from the API. @@ -809,7 +855,10 @@ def remove_agents(self): Example: host_manager.remove_agent('my_host', 'my_agent_id') """ - token = self.get_api_token(self.get_master()) + user, password = self.get_api_credentials() + + token = self.get_api_token(self.get_master(), user=user, password=password) + agents_ids = self.get_agents_ids() result = self.make_api_call( host=self.get_master(), @@ -842,6 +891,24 @@ def get_hosts_not_reachable(self) -> List[str]: return hosts_not_reachable + def clean_agents(self, restart_managers: bool = False) -> None: + """Clean and register agents + + Args: + host_manager (HostManager): An instance of the HostManager class. + restart_managers (bool, optional): Whether to restart the managers. Defaults to False. + """ + # Restart managers and stop agents + logging.info("Stopping agents") + self.control_environment("stop", ["agent"], parallel=True) + + logging.info("Removing agents") + self.remove_agents() + + if restart_managers: + logging.info("Restarting managers") + self.control_environment("restart", ["manager"], parallel=True) + def clean_environment(host_manager, target_files): """Clears a series of files on target hosts managed by a host manager diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index d86dcfef28..b849a285ee 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -346,3 +346,21 @@ def pytest_addoption(parser): type=str, help='Ansible roles path.', ) + parser.addoption( + '--enable-modulesd-debug', + action='store_true', + default=True, + help='Enable modulesd debug mode. Default: False', + ) + parser.addoption( + '--gather-evidences-when-passed', + action='store_true', + default=True, + help='Enable gather evidences when passed. Default: False', + ) + parser.addoption( + '--enable-verbose-evidences', + action='store_true', + default=True, + help='Enable verbose evidences. Default: False', + ) diff --git a/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml b/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml index 91dce5f6bc..9341ac8c84 100644 --- a/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml +++ b/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability.yaml @@ -1,483 +1,298 @@ +--- - case: Installation of a vulnerable package id: install_package description: | Installation of a vulnerable package - macos: - Used Package: http-proxy 0.5.9 - npm Format - CVES: - amd64: ["CVE-2017-16014"], - arm64v8: ["CVE-2017-16014"], - windows: - Used Package: Node 17.0.1 - .msi Format - CVE: ["CVE-2022-21824", "CVE-2022-0778", "CVE-2021-44533", "CVE-2021-44532", "CVE-2021-44531", "CVE-2021-4044"], - ubuntu: - Used Packages: Grafana 8.5.5 - .deb Format - CVE: ["CVE-2023-2183", "CVE-2023-1410", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", "CVE-2022-23498"], - centos: - Used Packages: Grafana 8.5.5 - .rpm Format - CVE: ["CVE-2023-2183", "CVE-2023-1410", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", "CVE-2022-23498"], preconditions: null body: - tasks: - - operation: install_package - target: agent - check: - alerts: true - state_index: true - package: - centos: - amd64: grafana-8.5.5-1 - arm64v8: grafana-8.5.5-1 - ubuntu: - amd64: grafana-8.5.5 - arm64v8: grafana-8.5.5 - windows: - amd64: node-v17.0.1 - macos: - amd64: http-proxy-0.5.9 - arm64v8: http-proxy-0.5.9 + operation: install_package + package: + centos: + amd64: grafana-8.5.5-1 + arm64v8: grafana-8.5.5-1 + ubuntu: + amd64: grafana-8.5.5 + arm64v8: grafana-8.5.5 + windows: + amd64: node-v17.0.1 + macos: + amd64: http-proxy-0.5.9 + arm64v8: http-proxy-0.5.9 + - case: Remove vulnerable package id: remove_package description: | Removal of a vulnerable package - macos: - Used Package: http-proxy 0.5.9 - npm Format - CVES Expected to mitigate: - ["CVE-2017-16014"], - windows: - Used Package: Node 17.0.1 - .msi Format - CVES Expected to mitigate: - ["CVE-2022-21824", "CVE-2022-0778", "CVE-2021-44533", "CVE-2021-44532", "CVE-2021-44531", "CVE-2021-4044"], - ubuntu: - Used Packages: Grafana 8.5.5 - .deb Format - CVES Expected to mitigate: - ["CVE-2023-2183", "CVE-2023-1410", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", "CVE-2022-23498"], - centos: - Used Packages: Grafana 8.5.5 - .rpm Format - CVE Expected to mitigate: - ["CVE-2023-2183", "CVE-2023-1410", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", "CVE-2022-23498"], - preconditions: null body: - tasks: - - operation: remove_package - target: agent - check: - alerts: true - state_index: true - package: - centos: - amd64: grafana-8.5.5-1 - arm64v8: grafana-8.5.5-1 - ubuntu: - amd64: grafana-8.5.5 - arm64v8: grafana-8.5.5 - windows: - amd64: node-v17.0.1 - macos: - amd64: http-proxy-0.5.9 - arm64v8: http-proxy-0.5.9 + operation: remove_package + package: + centos: + amd64: grafana-8.5.5-1 + arm64v8: grafana-8.5.5-1 + ubuntu: + amd64: grafana-8.5.5 + arm64v8: grafana-8.5.5 + windows: + amd64: node-v17.0.1 + macos: + amd64: http-proxy-0.5.9 + arm64v8: http-proxy-0.5.9 + - case: 'Upgrade: Maintain Vulnerability' id: upgrade_package_maintain_vulnerability description: | Upgrade of a vulnerable package which maintain vulnerability - macos: - Used Package: http-proxy 0.5.10 - npm Format - CVES: - amd64: ["CVE-2017-16014"], - arm64v8: ["CVE-2017-16014"], - windows: - Used Package: Node 17.1.0 - .msi Format - "CVE": ["CVE-2022-21824", "CVE-2022-0778", "CVE-2021-44533", "CVE-2021-44532", "CVE-2021-44531", "CVE-2021-4044"], - ubuntu: - Used Packages: Grafana 8.5.6 - .deb Format - CVE: ["CVE-2023-2183", "CVE-2023-1410", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", "CVE-2022-23498"], - centos: - Used Packages: Grafana 8.5.6 - .rpm Format - CVE: ["CVE-2023-2183", "CVE-2023-1410", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-31107", "CVE-2022-31097", "CVE-2022-23552", "CVE-2022-23498"], preconditions: - tasks: - - operation: install_package - target: agent - check: - alerts: true - state_index: true - package: - centos: - amd64: grafana-8.5.5-1 - arm64v8: grafana-8.5.5-1 - ubuntu: - amd64: grafana-8.5.5 - arm64v8: grafana-8.5.5 - windows: - amd64: node-v17.0.1 - macos: - amd64: http-proxy-0.5.9 - arm64v8: http-proxy-0.5.9 + operation: install_package + package: + centos: + amd64: grafana-8.5.5-1 + arm64v8: grafana-8.5.5-1 + ubuntu: + amd64: grafana-8.5.5 + arm64v8: grafana-8.5.5 + windows: + amd64: node-v17.0.1 + macos: + amd64: http-proxy-0.5.9 + arm64v8: http-proxy-0.5.9 body: - tasks: - - operation: update_package - target: agent - check: - alerts: true - state_index: true - package: - from: - centos: - amd64: grafana-8.5.5-1 - arm64v8: grafana-8.5.5-1 - ubuntu: - amd64: grafana-8.5.5 - arm64v8: grafana-8.5.5 - windows: - amd64: node-v17.0.1 - macos: - amd64: http-proxy-0.5.9 - arm64v8: http-proxy-0.5.9 - to: - centos: - amd64: grafana-8.5.6-1 - arm64v8: grafana-8.5.6-1 - ubuntu: - amd64: grafana-8.5.6 - arm64v8: grafana-8.5.6 - windows: - amd64: node-v17.1.0 - macos: - amd64: http-proxy-0.5.10 - arm64v8: http-proxy-0.5.10 + operation: update_package + package: + from: + centos: + amd64: grafana-8.5.5-1 + arm64v8: grafana-8.5.5-1 + ubuntu: + amd64: grafana-8.5.5 + arm64v8: grafana-8.5.5 + windows: + amd64: node-v17.0.1 + macos: + amd64: http-proxy-0.5.9 + arm64v8: http-proxy-0.5.9 + to: + centos: + amd64: grafana-8.5.6-1 + arm64v8: grafana-8.5.6-1 + ubuntu: + amd64: grafana-8.5.6 + arm64v8: grafana-8.5.6 + windows: + amd64: node-v17.1.0 + macos: + amd64: http-proxy-0.5.10 + arm64v8: http-proxy-0.5.10 - case: 'Upgrade: New vulnerability ' - id: upgrade_package_maintain_add_vulnerability + id: upgrade_package_add_vulnerability description: | Upgrade of a vulnerable package which include a new vulnerability - macos: - Used Package: systeminformation 5.0.0 - npm Format - CVE: ["CVE-2021-21388", "CVE-2021-21315", "CVE-2023-42810"], - windows: - Used Package: Node 18.0.0 - .msi Format - "CVE": ["CVE-2023-44487", CVE-2023-23936", CVE-2023-38552", "CVE-2023-32559", "CVE-2023-32006", "CVE-2023-32002", "CVE-2023-30590", "CVE-2023-30589", "CVE-2023-30588", "CVE-2023-30585", "CVE-2023-30581", "CVE-2023-23920", "CVE-2023-23919", "CVE-2023-23918", "CVE-2022-43548", "CVE-2022-35256", "CVE-2022-35255", "CVE-2022-32223", "CVE-2022-32222", "CVE-2022-32215", "CVE-2022-32214", "CVE-2022-32213", "CVE-2022-32212", "CVE-2022-3786", "CVE-2022-3602"], - ubuntu: - Used Packages: Grafana 9.1.1 - .deb Format - CVE: ["CVE-2023-2183", "CVE-2023-1387", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-23552", "CVE-2022-23498"], - centos: - Used Packages: Grafana 9.1.1 - .rpm Format - CVE: ["CVE-2023-2183", "CVE-2023-1387", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-23552", "CVE-2022-23498"], - preconditions: null body: - tasks: - - operation: update_package - target: agent - check: - alerts: true - state_index: true - package: - from: - centos: - amd64: grafana-8.5.6-1 - arm64v8: grafana-8.5.6-1 - ubuntu: - amd64: grafana-8.5.6 - arm64v8: grafana-8.5.6 - windows: - amd64: node-v17.1.0 - macos: - amd64: systeminformation-4.34.23 - arm64v8: systeminformation-4.34.23 - to: - centos: - amd64: grafana-9.1.1-1 - arm64v8: grafana-9.1.1-1 - ubuntu: - amd64: grafana-9.1.1 - arm64v8: grafana-9.1.1 - windows: - amd64: node-v18.0.0 - macos: - amd64: systeminformation-5.0.0 - arm64v8: systeminformation-5.0.0 + operation: update_package + package: + from: + centos: + amd64: grafana-8.5.6-1 + arm64v8: grafana-8.5.6-1 + ubuntu: + amd64: grafana-8.5.6 + arm64v8: grafana-8.5.6 + windows: + amd64: node-v17.1.0 + macos: + amd64: systeminformation-4.34.23 + arm64v8: systeminformation-4.34.23 + to: + centos: + amd64: grafana-9.1.1-1 + arm64v8: grafana-9.1.1-1 + ubuntu: + amd64: grafana-9.1.1 + arm64v8: grafana-9.1.1 + windows: + amd64: node-v18.0.0 + macos: + amd64: systeminformation-5.0.0 + arm64v8: systeminformation-5.0.0 - case: 'Upgrade: Maintain and new vulnerability ' id: upgrade_package_maintain_add_vulnerability description: > - Upgrade of a vulnerable package which maintain vulnerabilities and include - new ones - - macos: - Used Package: systeminformation 5.0.0 - npm Format - "CVE": ["CVE-2021-21388", "CVE-2021-21315", "CVE-2023-42810"], - windows: - Used Package: Node 18.1.0 - .msi Format - "CVE": ["CVE-2023-44487, CVE-2023-23936, CVE-2023-30589, CVE-2023-38552", "CVE-2023-32559", "CVE-2023-32006", "CVE-2023-32002", "CVE-2023-30590", "CVE-2023-30588", "CVE-2023-30585", "CVE-2023-30581", "CVE-2023-23920", "CVE-2023-23919", "CVE-2023-23918", "CVE-2022-43548", "CVE-2022-35256", "CVE-2022-35255", "CVE-2022-32222", "CVE-2022-32215", "CVE-2022-32214", "CVE-2022-32213", "CVE-2022-32212", "CVE-2022-3786", "CVE-2022-3602"], - ubuntu: - Used Packages: Grafana 9.2.0 - .deb Format - CVE: ["CVE-2023-3128", "CVE-2023-22462", "CVE-2023-2183", "CVE-2023-1410", "CVE-2023-1387", "CVE-2023-0594", "CVE-2023-0507", "CVE-2022-39328", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-23552", "CVE-2022-23498"], - centos: - Used Packages: Grafana 9.2.0 - .rpm Format - CVE: ["CVE-2023-2183", "CVE-2023-1387", "CVE-2022-39324", "CVE-2022-39307", "CVE-2022-39306", "CVE-2022-39229", "CVE-2022-39201", "CVE-2022-36062", "CVE-2022-35957", "CVE-2022-31130", "CVE-2022-31123", "CVE-2022-23552", "CVE-2022-23498"], - preconditions: null + Upgrade of a vulnerable package which maintain vulnerabilities + and include new ones body: - tasks: - - operation: update_package - target: agent - check: - alerts: true - state_index: true - package: - from: - centos: - amd64: grafana-9.1.1-1 - arm64v8: grafana-9.1.1-1 - ubuntu: - amd64: grafana-9.1.1 - arm64v8: grafana-9.1.1 - windows: - amd64: node-v18.0.0 - macos: - amd64: systeminformation-4.34.23 - arm64v8: systeminformation-4.34.23 - to: - centos: - amd64: grafana-9.2.0-1 - arm64v8: grafana-9.2.0-1 - ubuntu: - amd64: grafana-9.2.0 - arm64v8: grafana-9.2.0 - windows: - amd64: node-v18.1.0 - macos: - amd64: systeminformation-5.0.0 - arm64v8: systeminformation-5.0.0 + operation: update_package + package: + from: + centos: + amd64: grafana-9.1.1-1 + arm64v8: grafana-9.1.1-1 + ubuntu: + amd64: grafana-9.1.1 + arm64v8: grafana-9.1.1 + windows: + amd64: node-v18.0.0 + macos: + amd64: systeminformation-4.34.23 + arm64v8: systeminformation-4.34.23 + to: + centos: + amd64: grafana-9.2.0-1 + arm64v8: grafana-9.2.0-1 + ubuntu: + amd64: grafana-9.2.0 + arm64v8: grafana-9.2.0 + windows: + amd64: node-v18.1.0 + macos: + amd64: systeminformation-5.0.0 + arm64v8: systeminformation-5.0.0 - case: 'Upgrade: Cease vulnerability' id: upgrade_package_remove_vulnerability description: | Upgrade of a vulnerable which cease to be vulnerable - macos: - Used Package: http-proxy 0.7.0 - npm Format - "CVE": [], - windows: - Used Package: Node 18.20.0 - .msi Format - "CVE": [], - ubuntu: - Used Packages: Grafana 9.4.17 - .deb Format - CVE: [], - centos: - Used Packages: Grafana 9.4.17 - .rpm Format - CVE: [], - preconditions: null body: - tasks: - - operation: update_package - target: agent - check: - alerts: true - state_index: true - package: - from: - centos: - amd64: grafana-9.2.0-1 - arm64v8: grafana-9.2.0-1 - ubuntu: - amd64: grafana-9.2.0 - arm64v8: grafana-9.2.0 - windows: - amd64: node-v18.1.0 - macos: - amd64: http-proxy-0.5.10 - arm64v8: http-proxy-0.5.10 - to: - centos: - amd64: grafana-9.4.17-1 - arm64v8: grafana-9.4.17-1 - ubuntu: - arm64v8: grafana-9.4.17 - amd64: grafana-9.4.17 - windows: - amd64: node-v18.20.0 - macos: - amd64: http-proxy-0.7.0 - arm64v8: http-proxy-0.7.0 + operation: update_package + package: + from: + centos: + amd64: grafana-9.2.0-1 + arm64v8: grafana-9.2.0-1 + ubuntu: + amd64: grafana-9.2.0 + arm64v8: grafana-9.2.0 + windows: + amd64: node-v18.1.0 + macos: + amd64: http-proxy-0.5.10 + arm64v8: http-proxy-0.5.10 + to: + centos: + amd64: grafana-9.4.17-1 + arm64v8: grafana-9.4.17-1 + ubuntu: + arm64v8: grafana-9.4.17 + amd64: grafana-9.4.17 + windows: + amd64: node-v18.20.0 + macos: + amd64: http-proxy-0.7.0 + arm64v8: http-proxy-0.7.0 - case: 'Upgrade: Non vulnerable to non vulnerable' id: upgrade_package_nonvulnerable_to_nonvulnerable description: | Upgrade of a non vulnerable package to non vulnerable - macos: - Used Package: http-proxy 0.7.2 - npm Format - "CVE": [], - windows: - Used Package: Node 18.20.2 - .msi Format - "CVE": [], - ubuntu: - Used Packages: Grafana 9.5.13 - .deb Format - CVE: [], - centos: - Used Packages: Grafana 9.5.13 - .rpm Format - CVE: [], - preconditions: - tasks: - - operation: install_package - target: agent - check: - alerts: true - state_index: true - package: - windows: - amd64: node-v18.20.0 - macos: - amd64: http-proxy-0.7.0 - arm64v8: http-proxy-0.7.0 body: - tasks: - - operation: update_package - target: agent - check: - alerts: true - state_index: true - package: - from: - centos: - amd64: grafana-9.4.17-1 - arm64v8: grafana-9.4.17-1 - ubuntu: - arm64v8: grafana-9.4.17 - amd64: grafana-9.4.17 - windows: - amd64: node-v18.20.0 - macos: - amd64: http-proxy-0.7.0 - arm64v8: http-proxy-0.7.0 - to: - centos: - amd64: grafana-9.5.13-1 - arm64v8: grafana-9.5.13-1 - ubuntu: - amd64: grafana-9.5.13 - arm64v8: grafana-9.5.13 - windows: - amd64: node-v18.20.2 - macos: - amd64: http-proxy-0.7.2 - arm64v8: http-proxy-0.7.2 + operation: update_package + package: + from: + centos: + amd64: grafana-9.4.17-1 + arm64v8: grafana-9.4.17-1 + ubuntu: + arm64v8: grafana-9.4.17 + amd64: grafana-9.4.17 + windows: + amd64: node-v18.20.0 + macos: + amd64: http-proxy-0.7.0 + arm64v8: http-proxy-0.7.0 + to: + centos: + amd64: grafana-9.5.13-1 + arm64v8: grafana-9.5.13-1 + ubuntu: + amd64: grafana-9.5.13 + arm64v8: grafana-9.5.13 + windows: + amd64: node-v18.20.2 + macos: + amd64: http-proxy-0.7.2 + arm64v8: http-proxy-0.7.2 - case: 'Upgrade: Non vulnerable to vulnerable package' id: upgrade_package_nonvulnerable_to_vulnerable description: | Upgrade to non vulnerable package to vulnerable - macos: - Used Package: luxon 3.0.0 - npm Format - "CVE": ["CVE-2022-31129"], - windows: - Used Package: Node 20.5.1 - .msi Format - "CVE": ["CVE-2023-44487", "CVE-2023-39332", "CVE-2023-39331", "CVE-2023-38552"], - ubuntu: - Used Packages: Grafana 10.0.0 - .deb Format - CVE: ["CVE-2023-4822", "CVE-2023-4399"], - centos: - Used Packages: Grafana 10.0.0 - .rpm Format - CVE: ["CVE-2023-4822", "CVE-2023-4399"], - preconditions: null body: - tasks: - - operation: update_package - target: agent - check: - alerts: true - state_index: true - package: - from: - centos: - amd64: grafana-9.5.13-1 - arm64v8: grafana-9.5.13-1 - ubuntu: - amd64: grafana-9.5.13 - arm64v8: grafana-9.5.13 - windows: - amd64: node-v18.20.2 - macos: - amd64: luxon-2.5.2 - arm64v8: luxon-2.5.2 - to: - centos: - amd64: grafana-10.0.0-1 - arm64v8: grafana-10.0.0-1 - ubuntu: - amd64: grafana-10.0.0 - arm64v8: grafana-10.0.0 - windows: - amd64: node-v20.5.1 - macos: - amd64: luxon-3.0.0 - arm64v8: luxon-3.0.0 + operation: update_package + package: + from: + centos: + amd64: grafana-9.5.13-1 + arm64v8: grafana-9.5.13-1 + ubuntu: + amd64: grafana-9.5.13 + arm64v8: grafana-9.5.13 + windows: + amd64: node-v18.20.2 + macos: + amd64: luxon-2.5.2 + arm64v8: luxon-2.5.2 + to: + centos: + amd64: grafana-10.0.0-1 + arm64v8: grafana-10.0.0-1 + ubuntu: + amd64: grafana-10.0.0 + arm64v8: grafana-10.0.0 + windows: + amd64: node-v20.5.1 + macos: + amd64: luxon-3.0.0 + arm64v8: luxon-3.0.0 + teardown: + operation: remove_package + package: + centos: + amd64: grafana-10.0.0-1 + arm64v8: grafana-10.0.0-1 + ubuntu: + amd64: grafana-10.0.0 + arm64v8: grafana-10.0.0 + windows: + amd64: node-v20.5.1 + macos: + amd64: luxon-3.0.0 + arm64v8: luxon-3.0.0 - case: Installation of a non vulnerable package id: install_package_non_vulnerable description: | Installation of a non vulnerable package - macos: - Used Package: http-proxy 0.7.0 - npm Format - "CVE": [], - windows: - Used Package: Node 18.20.0 - .msi Format - "CVE": [], - ubuntu: - Used Packages: Grafana 9.5.13 - .deb Format - CVE: [], - centos: - Used Packages: Grafana 9.5.13 - .rpm Format - CVE: [], - preconditions: null body: - tasks: - - operation: install_package - target: agent - check: - alerts: true - state_index: true - package: - centos: - amd64: grafana-9.5.13-1 - arm64v8: grafana-9.5.13-1 - ubuntu: - amd64: grafana-9.5.13 - arm64v8: grafana-9.5.13 - windows: - amd64: node-v18.20.0 - macos: - amd64: http-proxy-0.7.0 - arm64v8: http-proxy-0.7.0 + operation: install_package + package: + centos: + amd64: grafana-9.5.13-1 + arm64v8: grafana-9.5.13-1 + ubuntu: + amd64: grafana-9.5.13 + arm64v8: grafana-9.5.13 + windows: + amd64: node-v18.20.0 + macos: + amd64: http-proxy-0.7.0 + arm64v8: http-proxy-0.7.0 + - case: 'Remove: Non vulnerable package' id: remove_non_vulnerable_packge description: | Removal of a non vulnerable package - macos: - Used Package: http-proxy 0.7.0 - npm Format - "CVE": [], - windows: - Used Package: Node 18.20.0 - .msi Format - "CVE": [], - ubuntu: - Used Packages: Grafana 9.5.13 - .deb Format - CVE: [] - centos: - Used Packages: Grafana 9.5.13 - .rpm Format - CVE: [], body: - tasks: - - operation: remove_package - target: agent - check: - alerts: true - state_index: true - package: - centos: - amd64: grafana-9.5.13-1 - arm64v8: grafana-9.5.13-1 - ubuntu: - amd64: grafana-9.5.13 - arm64v8: grafana-9.5.13 - windows: - amd64: node-v18.20.0 - macos: - amd64: http-proxy-0.7.0 - arm64v8: http-proxy-0.7.0 + operation: remove_package + package: + centos: + amd64: grafana-9.5.13-1 + arm64v8: grafana-9.5.13-1 + ubuntu: + amd64: grafana-9.5.13 + arm64v8: grafana-9.5.13 + windows: + amd64: node-v18.20.0 + macos: + amd64: http-proxy-0.7.0 + arm64v8: http-proxy-0.7.0 diff --git a/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability_single_vulnerable_case.yaml b/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability_single_vulnerable_case.yaml new file mode 100644 index 0000000000..127791497f --- /dev/null +++ b/tests/end_to_end/test_vulnerability_detector/cases/test_vulnerability_single_vulnerable_case.yaml @@ -0,0 +1,32 @@ +- case: Installation of a vulnerable package + id: install_package + description: | + Installation of a vulnerable package + body: + operation: install_package + package: + centos: + amd64: grafana-8.5.5-1 + arm64v8: grafana-8.5.5-1 + ubuntu: + amd64: grafana-8.5.5 + arm64v8: grafana-8.5.5 + windows: + amd64: node-v17.0.1 + macos: + amd64: http-proxy-0.5.9 + arm64v8: http-proxy-0.5.9 + teardown: + operation: remove_package + package: + centos: + amd64: grafana-8.5.5-1 + arm64v8: grafana-8.5.5-1 + ubuntu: + amd64: grafana-8.5.5 + arm64v8: grafana-8.5.5 + windows: + amd64: node-v17.0.1 + macos: + amd64: http-proxy-0.5.9 + arm64v8: http-proxy-0.5.9 diff --git a/tests/end_to_end/test_vulnerability_detector/configurations/agent.yaml b/tests/end_to_end/test_vulnerability_detector/configurations/agent.yaml index b965697a4e..1ec2928c48 100644 --- a/tests/end_to_end/test_vulnerability_detector/configurations/agent.yaml +++ b/tests/end_to_end/test_vulnerability_detector/configurations/agent.yaml @@ -18,5 +18,4 @@ - disabled: value: 'no' - interval: - value: 2m - + value: SYSCOLLECTOR_INTERVAL diff --git a/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml b/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml index 646063a0df..b855af9f9f 100644 --- a/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml +++ b/tests/end_to_end/test_vulnerability_detector/configurations/manager.yaml @@ -2,7 +2,7 @@ - section: vulnerability-detection elements: - enabled: - value: 'yes' + value: VULNERABILITY_DETECTOR_ENABLE - index-status: value: 'yes' - feed-update-interval: diff --git a/tests/end_to_end/test_vulnerability_detector/conftest.py b/tests/end_to_end/test_vulnerability_detector/conftest.py index b5aa64b32a..a242eaef7c 100644 --- a/tests/end_to_end/test_vulnerability_detector/conftest.py +++ b/tests/end_to_end/test_vulnerability_detector/conftest.py @@ -29,57 +29,178 @@ def test_example(host_manager): pass ``` """ -import pytest -import json import datetime -import os +import time +import json import logging +import os import shutil import uuid -from py.xml import html -from numpydoc.docscrape import FunctionDoc -from typing import Generator, Dict +import pytest +from numpydoc.docscrape import FunctionDoc +from py.xml import html +from typing import Dict, Generator + +from wazuh_testing.end_to_end import VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN +from wazuh_testing.end_to_end.vulnerability_detector import PACKAGE_VULNERABILITY_SCAN_TIME +from wazuh_testing.end_to_end.check_validators import ( + compare_expected_found_vulnerabilities, + compare_expected_found_vulnerabilities_alerts, + get_failed_operation_hosts) +from wazuh_testing.end_to_end.configuration import (backup_configurations, + restore_configuration, + save_indexer_credentials_into_keystore) +from wazuh_testing.end_to_end.indexer_api import ( + WAZUH_STATES_VULNERABILITIES_INDEXNAME, delete_index) +from wazuh_testing.end_to_end.logs import (get_hosts_alerts, get_hosts_logs, + truncate_remote_host_group_files) +from wazuh_testing.end_to_end.remote_operations_handler import ( + filter_hosts_by_os, get_expected_alerts, get_expected_index, + get_vulnerabilities_index, get_vulnerability_alerts, + launch_parallel_operations) from wazuh_testing.tools.system import HostManager -from wazuh_testing.end_to_end.remote_operations_handler import launch_parallel_operations -from wazuh_testing.end_to_end.logs import get_hosts_logs, get_hosts_alerts - STYLE_PATH = os.path.join(os.path.dirname(__file__), '../../../deps/wazuh_testing/wazuh_testing/reporting/style.css') -gather_evidences_when_passed = False +gather_evidences_when_passed = True +enable_verbose_evidences = True catalog = list() results = dict() -def collect_e2e_environment_data(test_name, host_manager) -> None: - """Collect data from the environment for the test +@pytest.fixture(scope="module", autouse=True) +def install_npm(host_manager: HostManager): + """Check and install npm if not already installed""" + + node_version = "v21.7.1" + node_package_url = f"https://nodejs.org/dist/{node_version}/node-{node_version}.pkg" + + target_os_groups = ["macos"] + + for group in target_os_groups: + for host in host_manager.get_group_hosts(group): + # Check if Node and npm is installed + logging.info(f"Checking and installing npm on {host}") + node_check_command = "PATH=/usr/local/bin:$PATH && command -v node" + node_check_result = host_manager.get_host(host).ansible( + "shell", + node_check_command, + become=True, + become_user="vagrant", + check=False, + ) + logging.info(f"Node check result on {host}: {node_check_result}") + # Install node if it is not already installed. + if node_check_result["rc"] != 0: + logging.info( + f"Installing Node.js and npm using package: {node_package_url}" + ) + + # Use the install_package method to handle the installation. + install_result = host_manager.install_package( + host, node_package_url, system="macos" + ) + + # Logging the result of installation attempt. + logging.info( + f"Node.js and npm installation result on {host}: {install_result}" + ) + else: + logging.info("Node.js and npm are already installed.") + + +@pytest.fixture(scope="module") +def backup_configuration(host_manager: HostManager): + hosts_configuration_backup = backup_configurations(host_manager) + + yield + + logging.error("Restoring original configuration") + restore_configuration(host_manager, hosts_configuration_backup) + + logging.error("Restarting environment") + + host_manager.control_environment("restart", ["agent"], parallel=True) + host_manager.control_environment("restart", ["manager"], parallel=True) + + +@pytest.fixture(scope="module") +def clean_environment_logs(host_manager: HostManager): + """Clean Agents and Managers logs Args: - test_name: Name of the test host_manager: An instance of the HostManager class containing information about hosts. + """ - logging.info("Collecting environment data") - environment_logs = get_hosts_logs(host_manager) - environment_alerts = get_hosts_alerts(host_manager) + yield - current_dir = os.path.dirname(__file__) - vulnerability_detector_logs_dir = os.path.join(current_dir, "logs") - tests_evidences_directory = os.path.join(str(vulnerability_detector_logs_dir), str(test_name)) + logging.error("Truncate managers and agents logs") + truncate_remote_host_group_files(host_manager, "all", "logs") + + +@pytest.fixture(scope="function") +def clean_environment_logs_function(host_manager: HostManager): + """Clean Agents and Managers logs + + Args: + host_manager: An instance of the HostManager class containing information about hosts. + + """ + yield + + logging.error("Truncate managers and agents logs") + truncate_remote_host_group_files(host_manager, "all", "logs") - for host in environment_logs.keys(): - logging.info(f"Collecting logs for {host}") - host_logs_name_evidence = host + "_ossec.log" - evidence_log_file = os.path.join(tests_evidences_directory, host_logs_name_evidence) - with open(evidence_log_file, 'w') as evidence_log_file: - evidence_log_file.write(environment_logs[host]) - for host in environment_alerts.keys(): - logging.info(f"Collecting alerts for {host}") - host_alerts_name_evidence = host + "_alert.json" - evidence_alert_file = os.path.join(tests_evidences_directory, host_alerts_name_evidence) - with open(evidence_alert_file, 'w') as evidence_alert_file: - evidence_alert_file.write(environment_alerts[host]) +@pytest.fixture(scope="module") +def delete_states_vulnerability_index(host_manager: HostManager): + """Delete vulnerability index + + Args: + host_manager: An instance of the HostManager class containing information about hosts. + """ + yield + logging.error("Delete vulnerability index") + delete_index(host_manager, index=WAZUH_STATES_VULNERABILITIES_INDEXNAME) + + +def collect_e2e_environment_data(logs_path, host_manager) -> None: + """Collect data from the environment for the test + + Args: + test_name: Name of the test + host_manager: An instance of the HostManager class containing information about hosts. + """ + logging.info("Collecting environment data") + try: + environment_logs = get_hosts_logs(host_manager) + environment_alerts = get_hosts_alerts(host_manager) + + tests_evidences_directory = logs_path + + for host in environment_logs.keys(): + logging.info(f"Collecting logs for {host}") + host_logs_name_evidence = host + "_ossec.log" + evidence_log_file = os.path.join(tests_evidences_directory, host_logs_name_evidence) + with open(evidence_log_file, 'w') as evidence_log_file: + evidence_log_file.write(environment_logs[host]) + + for host in environment_alerts.keys(): + logging.info(f"Collecting alerts for {host}") + host_alerts_name_evidence = host + "_alert.json" + evidence_alert_file = os.path.join(tests_evidences_directory, host_alerts_name_evidence) + with open(evidence_alert_file, 'w') as evidence_alert_file: + evidence_alert_file.write(environment_alerts[host]) + + for host in environment_logs.keys(): + logging.info(f"Collecting logs for {host}") + host_logs_name_evidence = host + "_ossec.log" + evidence_file = os.path.join(logs_path, host_logs_name_evidence) + with open(evidence_file, 'w') as evidence_file: + evidence_file.write(environment_logs[host]) + except Exception as e: + logging.critical(f"Error collecting environment data: {e}") def collect_evidences(test_name, evidences) -> None: @@ -145,6 +266,19 @@ def validate_environment(host_manager: HostManager) -> None: assert len(hosts_not_reachable) == 0, f"Hosts not reachable: {hosts_not_reachable}" +@pytest.fixture(scope="module") +def save_indexer_credentials_keystore(host_manager: HostManager) -> None: + """Save the Wazuh indexer username and password into the Wazuh manager keystore + + Args: + host_manager (HostManager): HostManager fixture. + """ + logging.error( + "Save the Wazuh indexer username and password into the Wazuh manager keystore" + ) + save_indexer_credentials_into_keystore(host_manager) + + @pytest.fixture(scope='function') def setup(preconditions, teardown, host_manager) -> Generator[Dict, None, None]: """Fixture for running setup and teardown operations for the specified tests case @@ -161,33 +295,56 @@ def setup(preconditions, teardown, host_manager) -> Generator[Dict, None, None]: logging.info("Running setup") result = {} + failed_agents = [] if preconditions: logging.info("Running preconditions") - result = launch_parallel_operations(preconditions['tasks'], host_manager) + target_to_ignore = [] + agents_to_check = host_manager.get_group_hosts("agent") + + if 'target_os' in preconditions: + agents_to_check = filter_hosts_by_os(host_manager, preconditions['target_os']) + target_to_ignore = list(set(host_manager.get_group_hosts('agent')) - set(agents_to_check)) + + result = launch_parallel_operations(preconditions, host_manager, target_to_ignore) logging.info(f"Preconditions finished. Results: {result}") - for host in result.keys(): - if result[host]['checks']['all_successfull'] is False: - logging.critical(f"Test failed for host {host}. Check logs for more information") - logging.critical(f"Evidences: {result[host]['evidences']}") + logging.info(f"Result of preconditions: {result}") + + test_timestamp = datetime.datetime.now(datetime.timezone.utc) + test_timestamp = test_timestamp.strftime("%Y-%m-%dT%H:%M:%S") + + package_data = [preconditions['package']] + + timeout_syscollector_scan = VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN + timeout_vulnerabilities_detected = len(agents_to_check) * PACKAGE_VULNERABILITY_SCAN_TIME + + time.sleep(timeout_syscollector_scan + timeout_vulnerabilities_detected) + + vulnerabilities = get_vulnerabilities_index(host_manager, agents_to_check, package_data) + vulnerabilities_from_alerts = get_vulnerability_alerts(host_manager, agents_to_check, package_data, + test_timestamp) - logging.info(f"Result of preconditions: {result}") + expected_alerts = get_expected_alerts(host_manager, agents_to_check, preconditions['operation'], + preconditions['package']) + expected_vulnerabilities = get_expected_index(host_manager, agents_to_check, preconditions['operation'], + preconditions['package']) - yield result + vuln_alerts = compare_expected_found_vulnerabilities_alerts(vulnerabilities_from_alerts, expected_alerts) + vuln_detected = compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabilities) + operation_result = get_failed_operation_hosts(result) + + failed_agents = vuln_alerts.get('failed_agents', []) + failed_agents.extend(vuln_detected.get('failed_agents', [])) + failed_agents.extend(operation_result) + + yield list(set(failed_agents)) logging.info("Running teardown") if teardown: result = launch_parallel_operations(teardown, host_manager) - for host in result.keys(): - if result[host]['checks']['all_successfull'] is False: - logging.critical(f"Test failed for host {host}. Check logs for more information") - logging.critical(f"Evidences: {result[host]['evidences']}") - - logging.info(f"Result of teardown: {result}") - @pytest.fixture(scope='session', autouse=True) def handle_logs(): @@ -202,6 +359,52 @@ def handle_logs(): shutil.rmtree(logs_dir, ignore_errors=True) +@pytest.fixture(scope='session', autouse=True) +def enable_modulesd_debug(request, host_manager): + """Fixture for enabling modulesd debug mode + + Args: + request: Pytest request object + + Returns: + bool: True if modulesd debug mode is enabled, False otherwise + """ + + modulesd_debug_enabled = request.config.getoption('--enable-modulesd-debug') + + if modulesd_debug_enabled: + logging.critical("Enabling modulesd debug mode") + for manager in host_manager.get_group_hosts('manager'): + host_manager.modify_file_content(manager, '/var/ossec/etc/local_internal_options.conf', 'wazuh_modules.debug=2\n') + host_manager.control_environment('restart', ['manager'], parallel=True) + + yield + + if modulesd_debug_enabled: + logging.critical("Disabling modulesd debug mode") + for manager in host_manager.get_group_hosts('manager'): + host_manager.modify_file_content(manager, '/var/ossec/etc/local_internal_options.conf', 'wazuh_modules.debug=0\n') + host_manager.control_environment('restart', ['manager'], parallel=True) + + +@pytest.fixture(scope='session', autouse=True) +def handle_gather_evidence_when_passed(request): + """Fixture for gathering evidences when the test passed + + Args: + request: Pytest request object + """ + + global gather_evidences_when_passed + global enable_verbose_evidences + + if request.config.getoption('--gather-evidences-when-passed'): + gather_evidences_when_passed = True + + if request.config.getoption('--enable-verbose-evidences'): + enable_verbose_evidences = True + + # Configure logging @pytest.hookimpl(hookwrapper=True, tryfirst=True) def pytest_runtest_setup(item): @@ -239,18 +442,11 @@ def pytest_runtest_teardown(item, nextitem): def pytest_html_results_table_header(cells): - cells.insert(4, html.th('Tier', class_='sortable tier', col='tier')) - cells.insert(3, html.th('Markers')) cells.insert(2, html.th('Description')) - cells.insert(1, html.th('Time', class_='sortable time', col='time')) - def pytest_html_results_table_row(report, cells): try: - cells.insert(4, html.td(report.tier)) - cells.insert(3, html.td(report.markers)) cells.insert(2, html.td(report.description)) - cells.insert(1, html.td(datetime.utcnow(), class_='col-time')) except AttributeError: pass @@ -328,16 +524,16 @@ def pytest_runtest_makereport(item, call): logs_path = os.path.join(str(vulnerability_detector_logs_dir), item._request.node.name) if 'host_manager' in item.funcargs: - collect_e2e_environment_data(item._request.node.name, item.funcargs['host_manager']) - - if 'get_results' in item.funcargs: - test_result = item.funcargs['get_results'] + collect_e2e_environment_data(logs_path, item.funcargs['host_manager']) - if item._request.node.name in test_result and 'evidences' in test_result[item._request.node.name]: - evidences = test_result[item._request.node.name]['evidences'] - collect_evidences(item._request.node.name, evidences) - else: - logging.info(f"No evidences found for {item._request.node.name}") + if dict(item.user_properties): + test_result = dict(item.user_properties)['test_result'] + try: + test_result.collect_evidences(logs_path, enable_verbose_evidences, gather_evidences_when_passed) + except Exception as e: + logging.critical(f"Error collecting evidences: {e} for {item._request.node.name}") + else: + logging.info(f"No evidences found for {item._request.node.name}") files = [] @@ -352,7 +548,7 @@ def pytest_runtest_makereport(item, call): content = f.read() extra.append(pytest_html.extras.text(content, name=os.path.split(filepath)[-1])) except Exception as e: - logging.critical(f"Error collecting evidences: {e} for {item._request.node.name}") + logging.critical(f"Unexpected error in evidence collection: {e} for {item._request.node.name}") if gather_evidences_when_passed and not report.skipped: report.extra = extra diff --git a/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py b/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py index ef97ef88e5..0be278768d 100644 --- a/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py +++ b/tests/end_to_end/test_vulnerability_detector/test_vulnerability_detector.py @@ -17,11 +17,16 @@ Tests: - TestInitialScans: Validates the initiation of Syscollector scans across all agents in the environment. - - test_syscollector_first_scan: Validates the initiation of the first Syscollector scans across all agents in the environment. - - test_syscollector_first_scan_index: Validates that the Vulnerability Detector detects vulnerabilities within the environment in the first scan in the index. - - test_syscollector_second_scan: Validates the initiation of the second Syscollector scans across all agents in the environment. - - tests_syscollector_first_second_scan_consistency_index: Ensure the consistency of the agent's vulnerabilities between the first and second scans in index. - - TestScanSyscollectorCases: Validates the Vulnerability Detector's ability to detect new vulnerabilities in the environment for each of the defined cases. + - test_syscollector_first_scan: Validates the initiation of the first Syscollector scans across all agents + in the environment. + - test_syscollector_first_scan_index: Validates that the Vulnerability Detector detects vulnerabilities + within the environment in the first scan in the index. + - test_syscollector_second_scan: Validates the initiation of the second Syscollector scans + across all agents in the environment. + - tests_syscollector_first_second_scan_consistency_index: Ensure the consistency of the agent's vulnerabilities + between the first and second scans in index. + - TestScanSyscollectorCases: Validates the Vulnerability Detector's ability to detect new vulnerabilities in the + environment for each of the defined cases. Issue: https://github.com/wazuh/wazuh-qa/issues/4369 @@ -38,185 +43,219 @@ - vulnerability_detector - tier0 """ -import os -import pytest +import datetime import logging -import yaml +import os import time -import ast -import datetime -from typing import Generator -from wazuh_testing.end_to_end.configuration import backup_configurations, restore_configuration, \ - configure_environment, save_indexer_credentials_into_keystore -from wazuh_testing.end_to_end.logs import truncate_remote_host_group_files +import pytest +from wazuh_testing.end_to_end import Check, Evidence, TestResult, VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN +from wazuh_testing.end_to_end.check_validators import ( + compare_expected_found_vulnerabilities, + compare_expected_found_vulnerabilities_alerts, empty, empty_dict, equals, + equals_but_not_empty, no_errors, validate_operation_results, get_duplicated_vulnerabilities) +from wazuh_testing.end_to_end.configuration import ( + backup_configurations, change_agent_manager_ip, configure_environment, + load_vulnerability_detector_configurations, restore_configuration,) +from wazuh_testing.end_to_end.logs import check_errors_in_environment +from wazuh_testing.end_to_end.monitoring import \ + monitoring_syscollector_scan_agents +from wazuh_testing.end_to_end.remote_operations_handler import ( + get_expected_alerts, get_expected_index, get_vulnerabilities_index, + get_vulnerability_alerts, launch_parallel_operations) +from wazuh_testing.end_to_end.utils import (extract_case_info, get_case_ids, + load_test_cases) +from wazuh_testing.end_to_end.vulnerability_detector import (TIMEOUT_PER_AGENT_VULNERABILITY_FIRST_SCAN, + get_vulnerabilities_from_states_by_agent) from wazuh_testing.end_to_end.waiters import wait_until_vd_is_updated -from wazuh_testing.end_to_end.monitoring import generate_monitoring_logs, monitoring_events_multihost -from wazuh_testing.end_to_end.regex import get_event_regex -from wazuh_testing.end_to_end.indexer_api import get_indexer_values, delete_index, \ - create_vulnerability_states_indexer_filter, create_alerts_filter -from wazuh_testing.tools.configuration import load_configuration_template from wazuh_testing.tools.system import HostManager -from wazuh_testing.end_to_end.remote_operations_handler import launch_parallel_operations -from wazuh_testing.end_to_end.vulnerability_detector import get_vulnerabilities_from_states -from wazuh_testing.modules.syscollector import TIMEOUT_SYSCOLLECTOR_SCAN +pytestmark = [pytest.mark.e2e, pytest.mark.vulnerability_detector, pytest.mark.tier0] -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) -local_path = os.path.dirname(os.path.abspath(__file__)) -current_dir = os.path.dirname(__file__) -configurations_dir = os.path.join(current_dir, "configurations") -configurations_paths = { - 'manager': os.path.join(configurations_dir, 'manager.yaml'), - 'agent': os.path.join(configurations_dir, 'agent.yaml') - } -vulnerability_detector_logs_dir = os.path.join(current_dir, "logs") +AGENTS_SCANNED_FIRST_SCAN = [] +FIRST_SCAN_TIME = None +FIRST_SCAN_VULNERABILITIES_INDEX = {} +AGENT_REGISTRATION_TIMEOUT = 15 +PACKAGE_VULNERABILITY_SCAN_TIME = 120 -TIMEOUT_PER_AGENT_VULNERABILITY_SCAN = 200 +VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS = [ + r"Invalid ID \d{3} for the source", + r"Unable to connect to .* No connection could be made because the target machine actively refused", + r"Process locked due to agent is offline. Waiting for connection", + r"Lost connection with manager. Setting lock", + r"Unable to connect to .*Connection refused", + r"Unable to connect to .*No connection could be made because the target machine actively refused it", + r"Waiting for server reply \(not started\). Tried: .*Ensure that the manager version is 'v4.8.0' or higher", + r"Unable to connect to any server", +] +INITIAL_VULNERABILITIES = { + "vd_disabled_when_agents_registration": {}, + "vd_enabled_when_agents_registration": {}, +} -def load_vulnerability_detector_configurations(host_manager): - """Return the configurations for Vulnerability testing for the agent and manager roles +def configure_vulnerability_detection_and_wait_until_feeds_are_updated( + host_manager: HostManager, vulnerability_detection_enabled: bool +) -> None: + """Configure the environment with the default Vulnerability Detection configuration. - Return: - Dict: Configurations for each role + Args: + host_manager (HostManager): Instance of the HostManager class. + vulnerability_detection_enabled (bool): True if the vulnerability detection is enabled, False otherwise """ - configurations = {} - - for host in host_manager.get_group_hosts('all'): - if host in host_manager.get_group_hosts('agent'): - configurations[host] = load_configuration_template(configurations_paths['agent'], [{}], [{}]) - elif host in host_manager.get_group_hosts('manager'): - configuration_template = load_configuration_template(configurations_paths['manager'], [{}], [{}]) - - # Replace placeholders by real values - manager_index = host_manager.get_group_hosts('manager').index(host) + 2 - indexer_server = host_manager.get_group_hosts('indexer')[0] - indexer_server_variables = host_manager.get_host_variables(indexer_server) - configuration_variables = { - 'INDEXER_SERVER': indexer_server_variables['ip'], - 'FILEBEAT_ROOT_CA': '/etc/pki/filebeat/root-ca.pem', - 'FILEBEAT_CERTIFICATE': f"/etc/pki/filebeat/node-{manager_index}.pem", - 'FILEBEAT_KEY': f"/etc/pki/filebeat/node-{manager_index}-key.pem" - } - configuration_template_str = str(configuration_template) - - for key, value in configuration_variables.items(): - configuration_template_str = configuration_template_str.replace(key, value) - - configurations[host] = ast.literal_eval(configuration_template_str) - - return configurations - -@pytest.fixture(scope='module', autouse=True) -def install_npm(host_manager: HostManager): - """Check and install npm if not already installed""" - - node_version = "v21.7.1" - node_package_url = f"https://nodejs.org/dist/{node_version}/node-{node_version}.pkg" - - target_os_groups = ['macos'] - - for group in target_os_groups: - for host in host_manager.get_group_hosts(group): - # Check if Node and npm is installed - logger.info(f"Checking and installing npm on {host}") - node_check_command = "PATH=/usr/local/bin:$PATH && command -v node" - node_check_result = host_manager.get_host(host).ansible( - "shell", - node_check_command, - become=True, - become_user='vagrant', - check=False - ) - logger.info(f"Node check result on {host}: {node_check_result}") - # Install node if it is not already installed. - if node_check_result['rc'] != 0: - logger.info(f"Installing Node.js and npm using package: {node_package_url}") + logging.error( + f"Configuring the environment: Vulnerability Detection Enabled: {vulnerability_detection_enabled}" + ) + current_dir = os.path.dirname(__file__) + configurations_dir = os.path.join(current_dir, "configurations") + configurations_paths = { + "manager": os.path.join(configurations_dir, "manager.yaml"), + "agent": os.path.join(configurations_dir, "agent.yaml"), + } + + configure_environment( + host_manager, + load_vulnerability_detector_configurations( + host_manager, + configurations_paths=configurations_paths, + enable=vulnerability_detection_enabled, + ), + ) + + logging.error("Restarting managers") + host_manager.control_environment("restart", ["manager"], parallel=True) - # Use the install_package method to handle the installation. - install_result = host_manager.install_package(host, node_package_url, system='macos') + if vulnerability_detection_enabled: + logging.error("Wait until Vulnerability Detector has update all the feeds") + wait_until_vd_is_updated(host_manager) - # Logging the result of installation attempt. - logger.info(f"Node.js and npm installation result on {host}: {install_result}") - else: - logger.info("Node.js and npm are already installed.") -@pytest.fixture(scope='module') -def setup_vulnerability_tests(host_manager: HostManager) -> Generator: - """Setup the vulnerability tests environment +def start_agent_and_wait_until_connected(host_manager: HostManager) -> None: + """Start agents and wait until they are connected Args: - host_manager (HostManager): An instance of the HostManager class. + host_manager (HostManager): HostManager fixture """ - logger.error("Init setup of environment") + logging.error("Starting agents") + host_manager.control_environment("restart", ["agent"], parallel=True) - # Configure managers and agents - logger.error("Getting backup of current configurations") - hosts_configuration_backup = backup_configurations(host_manager) + logging.error("Wait until agents are connected") + time.sleep(AGENT_REGISTRATION_TIMEOUT * len(host_manager.get_group_hosts("agent"))) - logger.error("Configuring environment") - configure_environment(host_manager, load_vulnerability_detector_configurations(host_manager)) - logger.error("Save the Wazuh indexer username and password into the Wazuh manager keystore") - save_indexer_credentials_into_keystore(host_manager) - # Truncate alerts and logs of managers and agents - logger.error("Truncate managers and agents logs") - truncate_remote_host_group_files(host_manager, 'all', 'logs') +@pytest.fixture(scope="function") +def configure_vulnerability_detection_test_environment( + host_manager: HostManager, vulnerability_detection_previously_enabled: bool +): + """Configure the test environment for Vulnerability Detection. - # Restart managers and stop agents - logger.error("Stopping agents") - host_manager.control_environment('stop', ['agent']) + This fixture sets up the Vulnerability Detection environment based on the provided test case. + It performs the following actions: + - Cleans agent logs + - Configures the environment with the default VD template configuration, + enabling or disabling VD depending on the test case. + - If VD needs to be previously enabled, waits for feeds to be updated. + - Starts the agents. + - If VD needs to be previously disabled, enables VD and waits for feeds. + + Args: + host_manager (HostManager): Instance of the HostManager class. + vulnerability_detection_previously_enabled (bool): Indicates whether Vulnerability Detection + was previously enabled. + + Yields: + str: Timestamp of the test. + """ + host_manager.clean_agents() utc_now_timestamp = datetime.datetime.now(datetime.timezone.utc) # Format the date and time as per the given format - test_timestamp = utc_now_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ") + test_timestamp = utc_now_timestamp.strftime("%Y-%m-%dT%H:%M:%S") - logger.error("Restarting managers") - host_manager.control_environment('restart', ['manager']) + configure_vulnerability_detection_and_wait_until_feeds_are_updated( + host_manager, vulnerability_detection_previously_enabled + ) - # Wait until VD is updated - logger.error("Wait until Vulnerability Detector has update all the feeds") - wait_until_vd_is_updated(host_manager) + start_agent_and_wait_until_connected(host_manager) - # Start agents - host_manager.control_environment('start', ['agent']) + if not vulnerability_detection_previously_enabled: + configure_vulnerability_detection_and_wait_until_feeds_are_updated( + host_manager, True + ) yield test_timestamp - # Truncate alerts and logs of managers and agents - logger.error("Truncate managers and agents logs") - truncate_remote_host_group_files(host_manager, 'all', 'logs') - - # Delete vulnerability index - logger.error("Delete vulnerability index") - delete_index(host_manager, index='wazuh-states-vulnerabilities') - - logger.error("Restoring original configuration") - restore_configuration(host_manager, hosts_configuration_backup) - - logger.error("Restarting environment") - host_manager.control_environment('restart', ['agent']) - host_manager.control_environment('restart', ['manager']) +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +class TestInitialScans: + # Checks definition + all_agents_scanned_syscollector_first_scan_check = Check( + "all_agents_scanned_syscollector_first_scan", + empty, + expected_evidences=["agents_not_scanned_syscollector_first_scan"], + ) + all_agents_scanned_vulnerability_first_scan_check = Check( + "all_agents_scanned_vulnerability_first_scan", + empty, + expected_evidences=["agents_not_scanned_vulnerability_first_scan"], + ) + initial_vulnerabilities_consistent = Check( + "initial_vulnerabilities_consistent", + equals_but_not_empty, + expected_evidences=[ + "vd_disabled_when_agents_registration", + "vd_enabled_when_agents_registration", + ], + ) + vulnerabilities_consistent_first_second_scan = Check( + "vulnerabilities_consistent_first_second_scan", + equals, + expected_evidences=[ + "vulnerabilities_index_first_scan", + "vulnerabilities_index_second_scan", + ], + ) + all_agents_scanned_syscollector_second_scan_check = Check( + "all_agents_scanned_syscollector_second_scan", + empty, + expected_evidences=["agents_not_scanned_syscollector_second_scan"], + ) + no_errors_check = Check( + "no_errors", no_errors, expected_evidences=["error_level_messages"] + ) -@pytest.mark.filterwarnings('ignore::urllib3.exceptions.InsecureRequestWarning') -class TestInitialScans(): - results = {} + @pytest.fixture(scope="function") + def get_timestamp(self): + utc_now_timestamp = datetime.datetime.now(datetime.timezone.utc) + test_timestamp = utc_now_timestamp.strftime("%Y-%m-%dT%H:%M:%S") - @pytest.fixture(scope='class') - def get_results(self): - return self.results + return test_timestamp - def test_syscollector_first_scan(self, request, host_manager, setup_vulnerability_tests, get_results): + @pytest.mark.parametrize( + "vulnerability_detection_previously_enabled", + [False, True], + ids=[ + "vd_disabled_when_agents_registration", + "vd_enabled_when_agents_registration", + ], + ) + def test_first_syscollector_scan( + self, + request, + host_manager, + save_indexer_credentials_keystore, + vulnerability_detection_previously_enabled, + configure_vulnerability_detection_test_environment, + record_property, + clean_environment_logs, + delete_states_vulnerability_index, + ): """ - description: Validates the initiation of Syscollector scans across all agents in the environment. + description: Validates the initiation of the first Syscollector scans across all agents in the environment. - This test ensures that the Vulnerability Detector accurately detects vulnerabilities within the environment. - The Agent's Vulnerability Indexer index is expected to be updated with the detected vulnerabilities. + This test ensures that Syscollector first scans are started in all agents in the environment. tier: 0 @@ -225,65 +264,120 @@ def test_syscollector_first_scan(self, request, host_manager, setup_vulnerabilit - host_manager: type: fixture brief: Get the host manager of the environment - - setup_vulnerability_tests: + - vulnerability_detection_previously_enabled: type: fixture - brief: Setup the environment to proceed with the testing + brief: Whether the vulnerability detection was previously enabled + - configure_environment: + type: fixture + brief: Configure the environment with the given configurations - get_results: fixture to get the results of global class tests assertions: - - Verify that syscollector scan is started after agent start in all agents + - Verify that all agents has been scanned + """ + global AGENTS_SCANNED_FIRST_SCAN + global FIRST_SCAN_TIME + global FIRST_SCAN_VULNERABILITIES_INDEX + global INITIAL_VULNERABILITIES + + FIRST_SCAN_TIME = configure_vulnerability_detection_test_environment + + test_result = TestResult(request.node.name) + test_result.add_check(self.all_agents_scanned_syscollector_first_scan_check) + test_result.add_check(self.all_agents_scanned_vulnerability_first_scan_check) + test_result.add_check(self.no_errors_check) + + record_property("test_result", test_result) + + # Store the agents scanned by syscollector in a global variable. + # Only the last test case result is retained for comparison with the second scan. + agents_not_scanned_first_scan = monitoring_syscollector_scan_agents(host_manager, + VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN) + + AGENTS_SCANNED_FIRST_SCAN = [ + agent + for agent in host_manager.get_group_hosts("agent") + if agent not in agents_not_scanned_first_scan + ] + + test_result.validate_check( + "all_agents_scanned_syscollector_first_scan", + [ + Evidence( + "agents_not_scanned_syscollector_first_scan", + agents_not_scanned_first_scan, + ) + ], + ) + + if len(AGENTS_SCANNED_FIRST_SCAN) == 0: + logging.critical("Critical error. Test can not continue") + pytest.fail( + "Syscollector scan not started in any agent. Check agent logs for more information" + ) - cases: None + logging.critical("Waiting until agent all agents have been scanned.") + time.sleep(TIMEOUT_PER_AGENT_VULNERABILITY_FIRST_SCAN * len(AGENTS_SCANNED_FIRST_SCAN)) - tags: - - syscollector - - vulnerability_detector - """ - results = get_results - test_name = request.node.name - - test_result = { - 'checks': { - 'all_successfull': True, - }, - 'evidences': { - 'agents_not_scanned_first_scan': [] - } - } - - logger.critical("Monitoring syscollector first scan") - list_hosts = host_manager.get_group_hosts('agent') - monitoring_data = generate_monitoring_logs(host_manager, - [get_event_regex({'event': 'syscollector_scan_start'}), - get_event_regex({'event': 'syscollector_scan_end'})], - [TIMEOUT_SYSCOLLECTOR_SCAN, TIMEOUT_SYSCOLLECTOR_SCAN], - list_hosts) - monitoring_results = monitoring_events_multihost(host_manager, monitoring_data) - - logger.critical(f"Value of monitoring results is: {monitoring_results}") - - for agent in monitoring_results: - if monitoring_results[agent]['not_found']: - test_result['checks']['all_successfull'] = False - test_result['evidences']['agents_not_scanned_first_scan'].append(agent) - - results[test_name] = test_result - - if not test_result['checks']['all_successfull']: - logging_message = 'Syscollector scan not started in the following agents:' \ - f"{test_result['evidences']['agents_not_scanned_first_scan']}." - logger.critical(logging_message) - pytest.fail(logging_message) + logging.critical("Checking vulnerabilities in the index") + vuln_by_agent_index = get_vulnerabilities_from_states_by_agent( + host_manager, + AGENTS_SCANNED_FIRST_SCAN, + greater_than_timestamp=FIRST_SCAN_TIME, + ) + + # Store the vulnerabilities in the global variable to make the comparision in test_consistency_initial_scans + if not vulnerability_detection_previously_enabled: + INITIAL_VULNERABILITIES["vd_disabled_when_agents_registration"] = ( + vuln_by_agent_index + ) else: - logger.critical("All agents has been scanned") + INITIAL_VULNERABILITIES["vd_enabled_when_agents_registration"] = ( + vuln_by_agent_index + ) - def test_vulnerability_first_scan_index(self, request, host_manager, setup_vulnerability_tests, get_results): + FIRST_SCAN_VULNERABILITIES_INDEX = vuln_by_agent_index + + logging.critical( + "Checking that all agents has been scanned and generated vulnerabilities in the index" + ) + + agent_not_scanned = [] + # We expect at least one vulnerability in each agent + for agent, vulnerabilities in vuln_by_agent_index.items(): + if len(vulnerabilities) == 0: + logging.critical(f"No vulnerabilities found for {agent}") + agent_not_scanned.append(agent) + + # Validate that all agents has been scanned and generated vulnerabilities in the index + test_result.validate_check( + "all_agents_scanned_vulnerability_first_scan", + [ + Evidence( + "agents_not_scanned_vulnerability_first_scan", agent_not_scanned + ), + Evidence( + "vulnerabilities_index_first_scan", vuln_by_agent_index, debug=True + ), + ], + ) + + logging.critical("Checking for errors in the environment") + unexpected_errors = check_errors_in_environment( + host_manager, expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS + ) + + test_result.validate_check( + "no_errors", [Evidence("error_level_messages", unexpected_errors)] + ) + + assert test_result.get_test_result(), test_result.report() + + def test_consistency_initial_scans(self, request, record_property): """ - description: Validates that the Vulnerability Detector detects vulnerabilities within the environment in the - first scan in the index. + description: Ensure the consistency of the agent's vulnerabilities neither the agent was registered nor. - This test ensures that the Vulnerability Detector accurately detects vulnerabilities within the environment in - the index. It is assumed that provided hosts will have at least one vulnerability. + This test ensures that the agent's vulnerabilities are consistent with the initial scan. tier: 0 @@ -292,79 +386,39 @@ def test_vulnerability_first_scan_index(self, request, host_manager, setup_vulne - host_manager: type: fixture brief: Get the host manager of the environment - - setup_vulnerability_tests: - type: fixture - brief: Setup the environment to proceed with the testing - - get_results: fixture to get the results of global class tests assertions: - - Verify that all agents has been scanned - - Verify that all agents has generated vulnerabilities in the index - - cases: None - - tags: - - syscollector - - vulnerability_detector + - Verify that the number of vulnerabilities is the same between scans """ - results = get_results - test_name = request.node.name - test_result = { - 'checks': { - 'all_successfull': True, - }, - 'evidences': { - 'agents_not_detected_index_vulnerabilities': [], - 'vulnerabilities_index_first_scan': [] - } - } - - # Filter agents that has not been scanned - agents_to_check = [agent for agent in host_manager.get_group_hosts('agent') if agent not in - results['test_syscollector_first_scan']['evidences']['agents_not_scanned_first_scan']] - - if len(agents_to_check) == 0: - pytest.skip("Syscollector scan not started in any agent. Skipping test") - - # Check vulnerabilities in the index - logger.critical("Checking vulnerabilities in the index") - vuln_by_agent_index = {} - - time.sleep(TIMEOUT_PER_AGENT_VULNERABILITY_SCAN * len(agents_to_check)) - - for agent in agents_to_check: - filter = create_vulnerability_states_indexer_filter(agent, setup_vulnerability_tests) - agent_all_vulnerabilities = get_indexer_values(host_manager, - filter=filter, - index='wazuh-states-vulnerabilities')['hits']['hits'] - - vuln_by_agent_index[agent] = agent_all_vulnerabilities - - test_result['evidences']['vulnerabilities_index_first_scan'] = vuln_by_agent_index - - logger.critical("Checking that all agents has been scanned and generated vulnerabilities in the index") - for agent in agents_to_check: - if agent not in list(vuln_by_agent_index.keys()) or \ - len(vuln_by_agent_index[agent]) == 0: - - logger.critical(f"Agent {agent} has not been scanned. Continuing with remaining agents") - test_result['checks']['all_successfull'] = False - test_result['evidences']['agents_not_detected_index_vulnerabilities'].append(agent) - - results[test_name] = test_result - - # Store full alert index list in global results. It is needed for the next test - results['vulnerabilities_index_first_scan'] = vuln_by_agent_index - - if not test_result['checks']['all_successfull']: - logging_message = 'Some agents has not been scanned and generated vulnerabilities in the index:' \ - f"{test_result['evidences']['agents_not_detected_index_vulnerabilities']}." - logger.critical(logging_message) - pytest.fail(logging_message) - else: - logger.critical("All agents has been scanned and updated states index") - - def test_syscollector_second_scan(self, request, host_manager, setup_vulnerability_tests, get_results): + test_result = TestResult(request.node.name) + test_result.add_check(self.initial_vulnerabilities_consistent) + + record_property("test_result", test_result) + test_result.validate_check( + "initial_vulnerabilities_consistent", + [ + Evidence( + "vd_disabled_when_agents_registration", + INITIAL_VULNERABILITIES["vd_disabled_when_agents_registration"], + ), + Evidence( + "vd_enabled_when_agents_registration", + INITIAL_VULNERABILITIES["vd_enabled_when_agents_registration"], + ), + ], + ) + + assert test_result.get_test_result(), test_result.report() + + def test_syscollector_second_scan( + self, + request, + host_manager, + record_property, + get_timestamp, + clean_environment_logs, + delete_states_vulnerability_index, + ): """ description: Validates the initiation of the second Syscollector scans across all agents in the environment. @@ -391,302 +445,549 @@ def test_syscollector_second_scan(self, request, host_manager, setup_vulnerabili - syscollector - vulnerability_detector """ + TIMEOUT_SECOND_SCAN = 60 - results = get_results - test_name = request.node.name - test_result = { - 'checks': { - 'all_successfull': True, - }, - 'evidences': { - 'agents_syscollector_second_scan_not_started': [] - } - } - - # Filter agents that has not been scanned - agents_to_check = [agent for agent in host_manager.get_group_hosts('agent') if agent not in - results['test_syscollector_first_scan']['evidences']['agents_not_scanned_first_scan']] - - if len(agents_to_check) == 0: - pytest.skip("Syscollector scan not started in any agent. Skipping test") - - monitoring_data = generate_monitoring_logs(host_manager, - [get_event_regex({'event': 'syscollector_scan_start'}), - get_event_regex({'event': 'syscollector_scan_end'})], - [TIMEOUT_SYSCOLLECTOR_SCAN, TIMEOUT_SYSCOLLECTOR_SCAN], - host_manager.get_group_hosts('agent'), 2) - - monitoring_results = monitoring_events_multihost(host_manager, monitoring_data) - logger.critical(f"Value of monitoring results is: {monitoring_results}") - - logger.critical("Checking that all agents has been scanned") - for agent in monitoring_results: - if monitoring_results[agent]['not_found']: - test_result['checks']['all_successfull'] = False - test_result['evidences']['agents_syscollector_second_scan_not_started'].append(agent) - - logging.critical(f"Syscollector scan not started in the following agents:" - f"{test_result['evidences']['agents_syscollector_second_scan_not_started']}." - 'Continuing with the test') - - results[test_name] = test_result - - if not test_result['checks']['all_successfull']: - logging_message = "Syscollector scan not started in the following agents:" \ - f"{test_result['evidences']['agents_syscollector_second_scan_not_started']}." - pytest.fail(logging_message) - else: - logger.critical("Syscollector scan started in all agents") + global AGENTS_SCANNED_FIRST_SCAN - def tests_syscollector_first_second_scan_consistency_index(self, request, host_manager, setup_vulnerability_tests, - get_results): - """ - description: Ensure the consistency of the agent's vulnerabilities between the first and second scans in index. + test_result = TestResult(request.node.name) - This test ensure that alerts in the first scan are consistent with the alerts in the second scan in the index. + test_result.add_check(self.all_agents_scanned_syscollector_second_scan_check) + test_result.add_check(self.no_errors_check) + test_result.add_check(self.vulnerabilities_consistent_first_second_scan) - tier: 0 + record_property("test_result", test_result) - parameters: - - request: pytest request object - - host_manager: - type: fixture - brief: Get the host manager of the environment - - setup_vulnerability_tests: - type: fixture - brief: Setup the environment to proceed with the testing - - get_results: fixture to get the results of global class tests + if len(AGENTS_SCANNED_FIRST_SCAN) == 0: + pytest.skip( + "First scan was not executed or no agent was scanned. Skipping test" + ) - assertions: - - Verify that the number of vulnerabilities is the same between scans + agents_not_scanned_second_scan = monitoring_syscollector_scan_agents( + host_manager, + VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN, + greater_than_timestamp=get_timestamp, + ) + + test_result.validate_check( + "all_agents_scanned_syscollector_second_scan", + [ + Evidence( + "agents_not_scanned_syscollector_second_scan", + agents_not_scanned_second_scan, + ) + ], + ) + + agents_to_check_vulns = [ + agent + for agent in host_manager.get_group_hosts("agent") + if agent not in agents_not_scanned_second_scan + ] + + if len(agents_to_check_vulns) == 0: + pytest.fail( + "Syscollector scan not started in any agent. Check agent logs for more information" + ) - cases: None + logging.critical("Waiting until agent all agents have been scanned.") + time.sleep(TIMEOUT_SECOND_SCAN) + + global FIRST_SCAN_TIME + + logging.critical("Checking vulnerabilities in the index") + vuln_by_agent_index = get_vulnerabilities_from_states_by_agent( + host_manager, agents_to_check_vulns, greater_than_timestamp=FIRST_SCAN_TIME + ) + + logging.critical( + "Checking that all agents has been scanned and generated vulnerabilities in the index" + ) + test_result.validate_check( + "vulnerabilities_consistent_first_second_scan", + [ + Evidence( + "vulnerabilities_index_first_scan", FIRST_SCAN_VULNERABILITIES_INDEX + ), + Evidence("vulnerabilities_index_second_scan", vuln_by_agent_index), + ], + ) + + unexpected_errors = check_errors_in_environment( + host_manager, + expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, + greater_than_timestamp=get_timestamp, + ) + + test_result.validate_check( + "no_errors", [Evidence("error_level_messages", unexpected_errors)] + ) + + assert test_result.get_test_result(), test_result.report() + + +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +class TestScanSyscollectorCases: + + # Load basic test cases + current_dir = os.path.dirname(__file__) + basic_cases_path = os.path.join(current_dir, "cases", "test_vulnerability.yaml") + cases = load_test_cases(basic_cases_path) + complete_list = extract_case_info(cases) + list_ids = get_case_ids(cases) + + # Load single vulnerable case + single_vulnerable_packages_cases_path = os.path.join( + current_dir, "cases", "test_vulnerability_single_vulnerable_case.yaml" + ) + single_vulnerable_package_case = load_test_cases( + single_vulnerable_packages_cases_path + ) + single_vulnerable_case_complete_list = extract_case_info( + single_vulnerable_package_case + ) + single_vulnerable_case_list_ids = get_case_ids(single_vulnerable_package_case) - tags: - - syscollector - - vulnerability_detector - """ + # Load change manager test cases + change_manager_cases_path = os.path.join( + current_dir, "cases", "test_vulnerability_single_vulnerable_case.yaml" + ) + cases_change_manager = load_test_cases(change_manager_cases_path) + test_cases_change_manager = extract_case_info(cases_change_manager) + list_ids_change_manager = get_case_ids(cases_change_manager) - results = get_results - test_name = request.node.name - test_result = { - 'checks': { - 'all_successfull': True, - }, - 'evidences': { - 'vulnerabilities_index_first_scan': results['vulnerabilities_index_first_scan'], - 'vulnerabilities_index_second_scan': [], - 'vulnerabilities_not_found_in_second_scan': [], - 'vulnerabilities_not_found_in_first_scan': [], - 'agent_not_found_in_first_scan': [], - 'agent_not_found_in_second_scan': [] - } - } - - logger.critical("Checking vulnerabilities in the second scan") - - vuln_by_agent_index_second_scan = {} - for agent in host_manager.get_group_hosts('agent'): - filter = create_vulnerability_states_indexer_filter(target_agent=agent, - greater_than_timestamp=setup_vulnerability_tests) - agent_all_vulnerabilities = get_indexer_values(host_manager, - filter=filter, - index='wazuh-states-vulnerabilities')['hits']['hits'] - - # Only is expected alert of affected vulnerabilities - vuln_by_agent_index_second_scan[agent] = agent_all_vulnerabilities - - test_result['evidences']['vulnerabilities_index_second_scan'] = vuln_by_agent_index_second_scan - - # Calculate differences between first and second scan - agent_not_found_in_first_scan = (list(set(vuln_by_agent_index_second_scan.keys()) - - set(results['vulnerabilities_index_first_scan'].keys()))) - agent_not_found_in_second_scan = (list(set(results['vulnerabilities_index_first_scan'].keys()) - - set(vuln_by_agent_index_second_scan.keys()))) - - agent_found_in_all_scans = (set(vuln_by_agent_index_second_scan.keys()) & - set(results['vulnerabilities_index_first_scan'].keys())) - - vulnerabilities_not_found_in_first_scan = {} - vulnerabilities_not_found_in_second_scan = {} - - for agent in agent_found_in_all_scans: - vulnerabilities_second_scan = get_vulnerabilities_from_states(vuln_by_agent_index_second_scan[agent]) - vulnerabilities_first_scan = get_vulnerabilities_from_states( - results['vulnerabilities_index_first_scan'][agent]) - - # Calculate differences between first and second scan - vulnerabilities_not_found_second_scan = (list(set(vulnerabilities_first_scan) - - set(vulnerabilities_second_scan))) - vulnerabilities_not_found_first_scan = (list(set(vulnerabilities_second_scan) - - set(vulnerabilities_first_scan))) - - # Change to dict to be able to serialize - vulnerabilities_not_found_first_scan = [vuln._asdict() for vuln in vulnerabilities_not_found_first_scan] - vulnerabilities_not_found_second_scan = [vuln._asdict() for vuln in vulnerabilities_not_found_second_scan] - - if len(vulnerabilities_not_found_second_scan) > 0: - vulnerabilities_not_found_in_second_scan[agent] = vulnerabilities_not_found_second_scan - if len(vulnerabilities_not_found_first_scan) > 0: - vulnerabilities_not_found_in_first_scan[agent] = vulnerabilities_not_found_first_scan - - # Check if agents are the same in both scans - if (len(agent_found_in_all_scans) != len(vuln_by_agent_index_second_scan) != - len(results['vulnerabilities_index_first_scan'])): - test_result['checks']['all_successfull'] = False - logging.critical("Inconsistencies found between first and second scan in the index. Different agents found") - if len(agent_not_found_in_first_scan) > 0: - logging.critical(f"Agents not found in first scan: {agent_not_found_in_first_scan}") - test_result['evidences']['agent_not_found_in_first_scan'] = agent_not_found_in_first_scan - if len(agent_not_found_in_second_scan) > 0: - logging.critical(f"Agents not found in second scan: {agent_not_found_in_second_scan}") - test_result['evidences']['agent_not_found_in_second_scan'] = agent_not_found_in_second_scan - - # Check if vulnerabilities are the same in both scans - if vulnerabilities_not_found_in_first_scan or vulnerabilities_not_found_in_second_scan: - test_result['checks']['all_successfull'] = False - if vulnerabilities_not_found_in_first_scan: - logging.critical(f"Vulnerabilities not found in first scan: {vulnerabilities_not_found_in_first_scan}") - test_result['evidences']['vulnerabilities_not_found_in_first_scan'] = vulnerabilities_not_found_in_first_scan - - if vulnerabilities_not_found_in_second_scan: - logging.critical("Vulnerabilities not found in second scan: " - f"{vulnerabilities_not_found_in_second_scan}") - test_result['evidences']['vulnerabilities_not_found_in_second_scan'] = vulnerabilities_not_found_in_second_scan - - results[test_name] = test_result - - if not test_result['checks']['all_successfull']: - logging_message = "Inconsistencies found between first and second scan in the index." \ - "Check evidences for more information" - logger.critical(logging_message) - pytest.fail(logging_message) - -# ------------------------- - - -cases = {} - -with open(os.path.join(current_dir, os.path.join('cases', 'test_vulnerability.yaml')), 'r') as cases_file: - cases = yaml.load(cases_file, Loader=yaml.FullLoader) - -complete_list = [ - ( - case['preconditions'] if 'preconditions' in case else None, - case['body'] if 'body' in case else None, - case['teardown'] if 'teardown' in case else None + no_errors_check = Check( + "no_errors", no_errors, expected_evidences=["error_level_messages"] ) - for case in cases -] -list_ids = [case['id'] for case in cases] + operation_successfull_for_all_agents = validate_operation_results + operation_successfull_for_all_agents_check = Check( + "operation_successfull_for_all_agents", + operation_successfull_for_all_agents, + expected_evidences=["operation_results"], + ) + + setup_successfull_for_all_agents_check = Check( + "setup_operation_results", + empty, + expected_evidences=["setup_failed_agents"], + ) + + # Alerts Checks + expected_vulnerability_affected_alert_check = Check( + "expected_vulnerability_affected_alert", + empty_dict, + expected_evidences=["missing_affected_alerts"], + ) + expected_vulnerability_mitigated_alert_check = Check( + "expected_vulnerability_mitigated_alert", + empty_dict, + expected_evidences=["missing_mitigated_alerts"], + ) + + # Index Checks + expected_vulnerabilities_found_in_index_check = Check( + "expected_vulnerabilities_found_in_index", + empty_dict, + expected_evidences=["missing_vulnerabilities"], + ) + no_unexpected_vulnerabilities_found_in_index_check = Check( + "no_unexpected_vulnerabilities_found_in_index", + empty_dict, + expected_evidences=["unexpected_vulnerabilities"], + ) + + no_duplicated_vulnerabilities_check = Check('no_duplicated_vulnerabilities', empty, + expected_evidences=['duplicated_vulnerabilities']) + + @pytest.fixture(scope="function") + def permutate_agents_managers(self, host_manager): + backup_configuration = backup_configurations(host_manager) + + list_of_managers = host_manager.get_group_hosts("manager") + + for agent in host_manager.get_group_hosts("agent"): + agent_current_manager = host_manager.get_host_variables(agent)["manager"] + manager_index = list_of_managers.index(agent_current_manager) + next_manager = list_of_managers[(manager_index + 1) % len(list_of_managers)] + agent_os = host_manager.get_host_variables(agent)["os_name"] + manager_ip = ( + host_manager.get_host_variables(next_manager)["public_ip"] + if agent_os == "macos" + else host_manager.get_host_variables(next_manager)["ip"] + ) + + logging.error( + f"Changing manager for agent {agent} to {next_manager} ({manager_ip})" + ) + change_agent_manager_ip(host_manager, agent, manager_ip) -class TestScanSyscollectorCases(): - results = {} + yield - @pytest.fixture(scope='class') - def get_results(self): - return self.results + restore_configuration(host_manager, backup_configuration) - @pytest.mark.parametrize('preconditions, body, teardown', complete_list, ids=list_ids) - def test_vulnerability_detector_scans_cases(self, setup_vulnerability_tests, request, preconditions, body, teardown, - setup, host_manager, get_results): + @pytest.mark.parametrize("preconditions, body, teardown", single_vulnerable_case_complete_list, + ids=single_vulnerable_case_list_ids,) + def test_install_vulnerable_package_when_agent_down(self, host_manager, request, record_property, preconditions, + body, teardown, setup, clean_environment_logs, + delete_states_vulnerability_index, + clean_environment_logs_function): """ - description: Validates the Vulnerability Detector's ability to detect new vulnerabilities in the environment for each of the defined cases. + description: Install a vulnerable package when the agent is down. - This test evaluates the effectiveness of the Vulnerability Detector in real-world scenarios, focusing on the installation, removal, - or upgrade of various vulnerable and non-vulnerable packages in the environment. It ensures that all agents generate the expected - vulnerabilities and associated alerts. + This test ensures that the Vulnerability Detector detects the vulnerability when the agent is down. tier: 0 parameters: - - setup_vulnerability_tests: - type: fixture - brief: Setup the environment to proceed with the testing - request: pytest request object - - preconditions: - type: fixture - brief: The preconditions within the test cases, if any - - body: - type: fixture - brief: The body of the test case, which contains the tasks to be executed - - teardown: - type: fixture - brief: The teardown within the test cases, if any - - setup: - type: fixture - brief: Test setup results, to check if the hosts are setup correctly - host_manager: type: fixture brief: Get the host manager of the environment + - configure_environment: + type: fixture + brief: Configure the environment with the given configurations - get_results: fixture to get the results of global class tests assertions: - - Verify that all the hosts are properly setup. - - Verify whether vulnerabilities remain, appear or disappear, and whether alerts appear. - - cases: - - install_package - - remove_package - - upgrade_package_maintain_vulnerability - - upgrade_package_maintain_add_vulnerability - - upgrade_package_remove_vulnerability - - upgrade_package_nonvulnerable_to_nonvulnerable - - upgrade_package_nonvulnerable_to_vulnerable - - install_package_non_vulnerable - - remove_non_vulnerable_packge - - tags: - - syscollector - - vulnerability_detector """ - - test_name = request.node.name - - setup_results = setup - results = get_results - results[request.node.name] = {} - results['setup'] = setup_results - - hosts_to_ignore = [] - - for host in setup_results.keys(): - if setup_results[host]['checks']['all_successfull'] is False: - hosts_to_ignore.append(host) - - if len(hosts_to_ignore) > 0: - logger.critical(f"Setup test failed for hosts {hosts_to_ignore}. Check logs for more information") - logger.critical(f"Evidences: {setup_results}") - logger.critical("Ignoring these hosts for the rest of the test") - - logger.critical("Starting scan cases tests") - logger.critical(f"Case Info: {body}") - - # Launch tests tasks - test_result = launch_parallel_operations(body['tasks'], host_manager, hosts_to_ignore) - - success_for_all_agents = True - - for host in test_result.keys(): - if test_result[host]['checks']['all_successfull'] is False: - success_for_all_agents = False - logger.critical(f"Test failed for host {host}. Check logs for more information") - logger.critical(f"Evidences: {test_result[host]['evidences']}") - - results[test_name]['evidences'] = {} - - for agent in test_result.keys(): - if 'evidences' in test_result[agent]: - for evidence, evidence_values in test_result[agent]['evidences'].items(): - results[test_name]['evidences'][str(agent)+str(evidence)] = evidence_values - - if 'evidences' in test_result: - results[test_name]['evidences'] = test_result['evidences'] - - results[test_name] = test_result - - logger.critical("Final Results") - - assert success_for_all_agents is True, "Test failed. Check logs for more information" + # Is mandatory to launch this test along with the first scan test' + global AGENTS_SCANNED_FIRST_SCAN + if len(AGENTS_SCANNED_FIRST_SCAN) == 0: + pytest.skip("No agent was scanned in the first scan. Skipping test.") + + target_to_ignore = list( + set(host_manager.get_group_hosts("agent")) - set(AGENTS_SCANNED_FIRST_SCAN) + ) + + utc_now_timestamp = datetime.datetime.now(datetime.timezone.utc) + test_timestamp = utc_now_timestamp.strftime("%Y-%m-%dT%H:%M:%S") + + test_result = TestResult(request.node.name) + test_result.add_check(self.no_errors_check) + test_result.add_check(self.operation_successfull_for_all_agents_check) + test_result.add_check(self.expected_vulnerabilities_found_in_index_check) + test_result.add_check(self.no_unexpected_vulnerabilities_found_in_index_check) + test_result.add_check(self.no_duplicated_vulnerabilities_check) + + record_property("test_result", test_result) + + host_manager.control_environment("stop", ["agent"], parallel=True) + + # Install Vulnerable package + operations_result = launch_parallel_operations( + body, host_manager, target_to_ignore + ) + + logging.critical(f"Remote operation results: {operations_result}") + test_result.validate_check( + "operation_successfull_for_all_agents", + [Evidence("operation_results", operations_result)], + ) + + host_manager.control_environment("start", ["agent"], parallel=True) + + time.sleep(AGENT_REGISTRATION_TIMEOUT * len(AGENTS_SCANNED_FIRST_SCAN)) + time.sleep(VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN + PACKAGE_VULNERABILITY_SCAN_TIME) + + package_data = [body["package"]] + + vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data) + expected_vulnerabilities = get_expected_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], + body["package"]) + duplicated_vulnerabilities = get_duplicated_vulnerabilities(vulnerabilities) + + logging.critical("Validating found vulnerabilities") + test_result.validate_check('no_duplicated_vulnerabilities', + [Evidence('duplicated_vulnerabilities', duplicated_vulnerabilities), + Evidence('vulnerabilities', vulnerabilities) + ]) + + result = compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabilities) + + vulnerabilities_not_found = result["vulnerabilities_not_found"] + vulnerabilities_unexpected = result["vulnerabilities_unexpected"] + + logging.critical("Validating found vulnerabilities") + test_result.validate_check( + "expected_vulnerabilities_found_in_index", + [ + Evidence("missing_vulnerabilities", vulnerabilities_not_found), + Evidence("vulnerabilities_found_in_index", vulnerabilities), + Evidence("expected_vulnerabilities", expected_vulnerabilities), + ], + ) + + logging.critical("Validating unexpected vulnerabilities") + test_result.validate_check( + "no_unexpected_vulnerabilities_found_in_index", + [ + Evidence("unexpected_vulnerabilities", vulnerabilities_unexpected), + Evidence("vulnerabilities_found_in_index", vulnerabilities), + Evidence("expected_vulnerabilities", expected_vulnerabilities), + ], + ) + + errors_environment = check_errors_in_environment( + host_manager, + expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, + greater_than_timestamp=test_timestamp, + ) + test_result.validate_check("no_errors", [Evidence("error_level_messages", errors_environment)]) + + assert test_result.get_test_result(), test_result.report() + + @pytest.mark.parametrize("preconditions, body, teardown", test_cases_change_manager, + ids=list_ids_change_manager,) + def test_change_agent_manager(self, permutate_agents_managers, request, preconditions, body, + teardown, setup, host_manager, record_property, clean_environment_logs, + delete_states_vulnerability_index, clean_environment_logs_function): + # Is mandatory to launch this test along with the first scan test' + global AGENTS_SCANNED_FIRST_SCAN + if len(AGENTS_SCANNED_FIRST_SCAN) == 0: + pytest.skip("No agent was scanned in the first scan. Skipping test.") + + target_to_ignore = list( + set(host_manager.get_group_hosts("agent")) - set(AGENTS_SCANNED_FIRST_SCAN) + ) + + utc_now_timestamp = datetime.datetime.now(datetime.timezone.utc) + test_timestamp = utc_now_timestamp.strftime("%Y-%m-%dT%H:%M:%S") + + test_result = TestResult(request.node.name) + test_result.add_check(self.no_errors_check) + test_result.add_check(self.operation_successfull_for_all_agents_check) + test_result.add_check(self.expected_vulnerabilities_found_in_index_check) + test_result.add_check(self.no_unexpected_vulnerabilities_found_in_index_check) + test_result.add_check(self.expected_vulnerability_affected_alert_check) + test_result.add_check(self.expected_vulnerability_mitigated_alert_check) + test_result.add_check(self.no_duplicated_vulnerabilities_check) + + record_property("test_result", test_result) + + # Install Vulnerable package + operations_result = launch_parallel_operations( + body, host_manager, target_to_ignore + ) + + logging.critical(f"Remote operation results: {operations_result}") + test_result.validate_check( + "operation_successfull_for_all_agents", + [Evidence("operation_results", operations_result)], + ) + + # Wait for Syscollector and VD Scan + time.sleep(VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN + PACKAGE_VULNERABILITY_SCAN_TIME) + + package_data = [body["package"]] + + vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data) + expected_vulnerabilities = get_expected_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], + body["package"]) + + result = compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabilities) + + vulnerabilities_not_found = result["vulnerabilities_not_found"] + vulnerabilities_unexpected = result["vulnerabilities_unexpected"] + duplicated_vulnerabilities = get_duplicated_vulnerabilities(vulnerabilities) + + logging.critical("Validating found vulnerabilities") + test_result.validate_check('no_duplicated_vulnerabilities', + [Evidence('duplicated_vulnerabilities', duplicated_vulnerabilities), + Evidence('vulnerabilities', vulnerabilities) + ]) + + test_result.validate_check( + "expected_vulnerabilities_found_in_index", + [ + Evidence("missing_vulnerabilities", vulnerabilities_not_found), + Evidence("vulnerabilities_found_in_index", vulnerabilities), + Evidence("expected_vulnerabilities", expected_vulnerabilities), + ], + ) + + logging.critical("Validating unexpected vulnerabilities") + test_result.validate_check( + "no_unexpected_vulnerabilities_found_in_index", + [ + Evidence("unexpected_vulnerabilities", vulnerabilities_unexpected), + Evidence("vulnerabilities_found_in_index", vulnerabilities), + Evidence("expected_vulnerabilities", expected_vulnerabilities), + ], + ) + + alerts = get_vulnerability_alerts( + host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data, test_timestamp + ) + expected_alerts = get_expected_alerts( + host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], body["package"] + ) + + result_alert = compare_expected_found_vulnerabilities_alerts( + alerts, expected_alerts + ) + + logging.critical("Validating found alerts") + test_result.validate_check( + "expected_vulnerability_affected_alert", + [ + Evidence( + "missing_affected_alerts", + result_alert["vulnerabilities_affected_not_found"], + ), + Evidence("alerts_found_in_index", alerts), + Evidence("expected_alerts", expected_alerts), + ], + ) + + logging.critical("Validating unexpected alerts") + test_result.validate_check( + "expected_vulnerability_mitigated_alert", + [ + Evidence( + "missing_mitigated_alerts", + result_alert["vulnerabilities_mitigated_not_found"], + ), + Evidence("alerts_found_in_index", alerts), + Evidence("expected_alerts", expected_alerts), + ], + ) + + errors_environment = check_errors_in_environment( + host_manager, + expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, + greater_than_timestamp=test_timestamp, + ) + + test_result.validate_check("no_errors", [Evidence("error_level_messages", errors_environment)]) + + assert test_result.get_test_result(), test_result.report() + + @pytest.mark.parametrize("preconditions, body, teardown", complete_list, ids=list_ids) + def test_vulnerability_detector_scans_cases(self, request, preconditions, body, teardown, setup, host_manager, + record_property, clean_environment_logs, + delete_states_vulnerability_index, + clean_environment_logs_function): + # Is mandatory to launch this test along with the first scan test' + global AGENTS_SCANNED_FIRST_SCAN + if len(AGENTS_SCANNED_FIRST_SCAN) == 0: + pytest.skip("No agent was scanned in the first scan. Skipping test.") + + target_to_ignore = list( + set(host_manager.get_group_hosts("agent")) - set(AGENTS_SCANNED_FIRST_SCAN) + ) + + utc_now_timestamp = datetime.datetime.now(datetime.timezone.utc) + test_timestamp = utc_now_timestamp.strftime("%Y-%m-%dT%H:%M:%S") + + test_result = TestResult(request.node.name) + test_result.add_check(self.no_errors_check) + test_result.add_check(self.operation_successfull_for_all_agents_check) + test_result.add_check(self.expected_vulnerabilities_found_in_index_check) + test_result.add_check(self.no_unexpected_vulnerabilities_found_in_index_check) + test_result.add_check(self.expected_vulnerability_affected_alert_check) + test_result.add_check(self.expected_vulnerability_mitigated_alert_check) + test_result.add_check(self.setup_successfull_for_all_agents_check) + test_result.add_check(self.no_duplicated_vulnerabilities_check) + + record_property("test_result", test_result) + + agents_failed_setup = setup + test_result.validate_check("setup_operation_results", [Evidence("setup_failed_agents", agents_failed_setup)]) + + # Install Vulnerable package + operations_result = launch_parallel_operations( + body, host_manager, target_to_ignore + ) + + logging.critical(f"Remote operation results: {operations_result}") + test_result.validate_check( + "operation_successfull_for_all_agents", + [Evidence("operation_results", operations_result)], + ) + # Wait for syscollector and VD scan + time.sleep(VD_E2E_TIMEOUT_SYSCOLLECTOR_SCAN + PACKAGE_VULNERABILITY_SCAN_TIME) + + if "to" in body["package"]: + package_data = [body["package"]["to"], body["package"]["from"]] + else: + package_data = [body["package"]] + + vulnerabilities = get_vulnerabilities_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data) + expected_vulnerabilities = get_expected_index(host_manager, AGENTS_SCANNED_FIRST_SCAN, + body["operation"], body["package"]) + duplicated_vulnerabilities = get_duplicated_vulnerabilities(vulnerabilities) + + result = compare_expected_found_vulnerabilities(vulnerabilities, expected_vulnerabilities) + vulnerabilities_not_found = result["vulnerabilities_not_found"] + vulnerabilities_unexpected = result["vulnerabilities_unexpected"] + + test_result.validate_check('no_duplicated_vulnerabilities', + [Evidence('duplicated_vulnerabilities', duplicated_vulnerabilities), + Evidence('vulnerabilities', vulnerabilities) + ]) + + logging.critical("Validating found vulnerabilities") + test_result.validate_check( + "expected_vulnerabilities_found_in_index", + [ + Evidence("missing_vulnerabilities", vulnerabilities_not_found), + Evidence("vulnerabilities_found_in_index", vulnerabilities), + Evidence("expected_vulnerabilities", expected_vulnerabilities), + ], + ) + + logging.critical("Validating unexpected vulnerabilities") + test_result.validate_check( + "no_unexpected_vulnerabilities_found_in_index", + [ + Evidence("unexpected_vulnerabilities", vulnerabilities_unexpected), + Evidence("vulnerabilities_found_in_index", vulnerabilities), + Evidence("expected_vulnerabilities", expected_vulnerabilities), + ], + ) + + alerts = get_vulnerability_alerts(host_manager, AGENTS_SCANNED_FIRST_SCAN, package_data, test_timestamp) + expected_alerts = get_expected_alerts( + host_manager, AGENTS_SCANNED_FIRST_SCAN, body["operation"], body["package"] + ) + + result_alert = compare_expected_found_vulnerabilities_alerts( + alerts, expected_alerts + ) + + logging.critical("Validating found alerts") + test_result.validate_check( + "expected_vulnerability_affected_alert", + [ + Evidence( + "missing_affected_alerts", + result_alert["vulnerabilities_affected_not_found"], + ), + Evidence("alerts_found_in_index", alerts), + Evidence("expected_alerts", expected_alerts), + ], + ) + + logging.critical("Validating unexpected alerts") + test_result.validate_check( + "expected_vulnerability_mitigated_alert", + [ + Evidence( + "missing_mitigated_alerts", + result_alert["vulnerabilities_mitigated_not_found"], + ), + Evidence("alerts_found_in_index", alerts), + Evidence("expected_alerts", expected_alerts), + ], + ) + + errors_environment = check_errors_in_environment( + host_manager, + expected_errors=VULNERABILITY_DETECTION_E2E_EXPECTED_ERRORS, + greater_than_timestamp=test_timestamp, + ) + + test_result.validate_check("no_errors", [Evidence("error_level_messages", errors_environment)]) + + assert test_result.get_test_result(), test_result.report()