From c673cc962c8bd700f621b88d9fc4afc5705807ad Mon Sep 17 00:00:00 2001 From: xin liang Date: Tue, 10 Dec 2024 18:57:31 +0800 Subject: [PATCH 1/5] Dev: Rename ocfs2.py as cluster_fs.py --- crmsh/{ocfs2.py => cluster_fs.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename crmsh/{ocfs2.py => cluster_fs.py} (100%) diff --git a/crmsh/ocfs2.py b/crmsh/cluster_fs.py similarity index 100% rename from crmsh/ocfs2.py rename to crmsh/cluster_fs.py From c4d23d82036265a9467e8234cfd5af011eee7d4c Mon Sep 17 00:00:00 2001 From: xin liang Date: Tue, 10 Dec 2024 18:59:06 +0800 Subject: [PATCH 2/5] Dev: bootstrap: add gfs2 stage functionality (Technical Preview) (jsc#PED-11809) --- crmsh/bootstrap.py | 34 +++-- crmsh/cluster_fs.py | 336 ++++++++++++++++++++++++++------------------ crmsh/ui_cluster.py | 16 ++- data-manifest | 1 - 4 files changed, 233 insertions(+), 154 deletions(-) diff --git a/crmsh/bootstrap.py b/crmsh/bootstrap.py index 21667b12b8..d57906c987 100644 --- a/crmsh/bootstrap.py +++ b/crmsh/bootstrap.py @@ -39,7 +39,7 @@ from . import userdir from .constants import QDEVICE_HELP_INFO, STONITH_TIMEOUT_DEFAULT,\ REJOIN_COUNT, REJOIN_INTERVAL, PCMK_DELAY_MAX, CSYNC2_SERVICE, WAIT_TIMEOUT_MS_DEFAULT -from . import ocfs2 +from . import cluster_fs from . import qdevice from . import parallax from . import log @@ -76,7 +76,7 @@ "/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, SYSCONFIG_SBD, PCMK_REMOTE_AUTH, WATCHDOG_CFG, PROFILES_FILE, CRM_CFG, SBD_SYSTEMD_DELAY_START_DIR) -INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", "ocfs2", "admin", "qdevice") +INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", "ocfs2", "gfs2", "admin", "qdevice") INIT_STAGES_INTERNAL = ("csync2_remote", "qnetd_remote", "remote_auth") INIT_STAGES_ALL = INIT_STAGES_EXTERNAL + INIT_STAGES_INTERNAL JOIN_STAGES_EXTERNAL = ("ssh", "csync2", "ssh_merge", "cluster") @@ -119,6 +119,7 @@ def __init__(self): self.qdevice_heuristics_mode = None self.qdevice_rm_flag = None self.ocfs2_devices = [] + self.gfs2_devices = [] self.use_cluster_lvm2 = None self.mount_point = None self.cluster_node = None @@ -287,8 +288,8 @@ def validate_option(self): logger.warning("--no-overwrite-sshkey option is deprecated since crmsh does not overwrite ssh keys by default anymore and will be removed in future versions") if self.type == "join" and self.watchdog: logger.warning("-w option is deprecated and will be removed in future versions") - if self.ocfs2_devices or self.stage == "ocfs2": - ocfs2.OCFS2Manager.verify_ocfs2(self) + if self.ocfs2_devices or self.gfs2_devices or self.stage in ("ocfs2", "gfs2"): + cluster_fs.ClusterFSManager.pre_verify(self) if not self.skip_csync2 and self.type == "init": self.skip_csync2 = utils.get_boolean(os.getenv("SKIP_CSYNC2_SYNC")) if self.skip_csync2 and self.stage: @@ -1514,8 +1515,18 @@ def init_ocfs2(): """ if not _context.ocfs2_devices: return - ocfs2_manager = ocfs2.OCFS2Manager(_context) - ocfs2_manager.init_ocfs2() + ocfs2_manager = cluster_fs.ClusterFSManager(_context) + ocfs2_manager.init() + + +def init_gfs2(): + """ + GFS2 configure process + """ + if not _context.gfs2_devices: + return + gfs2_manager = cluster_fs.ClusterFSManager(_context) + gfs2_manager.init() def init_cluster(): @@ -2513,6 +2524,7 @@ def bootstrap_init(context): init_admin() init_qdevice() init_ocfs2() + init_gfs2() except lock.ClaimLockError as err: utils.fatal(err) @@ -2610,7 +2622,7 @@ def bootstrap_join(context): join_csync2(cluster_node, remote_user) join_ssh_merge(cluster_node, remote_user) probe_partitions() - join_ocfs2(cluster_node, remote_user) + join_cluster_fs(cluster_node, remote_user) join_cluster(cluster_node, remote_user) except (lock.SSHError, lock.ClaimLockError) as err: utils.fatal(err) @@ -2622,12 +2634,12 @@ def bootstrap_finished(): logger.info("Done (log saved to %s on %s)", log.CRMSH_LOG_FILE, utils.this_node()) -def join_ocfs2(peer_host, peer_user): +def join_cluster_fs(peer_host, peer_user): """ - If init node configured OCFS2 device, verify that device on join node + If init node configured OCFS2/GFS2 device, verify that device on join node """ - ocfs2_inst = ocfs2.OCFS2Manager(_context) - ocfs2_inst.join_ocfs2(peer_host) + inst = cluster_fs.ClusterFSManager(_context) + inst.join(peer_host) def remove_qdevice(): diff --git a/crmsh/cluster_fs.py b/crmsh/cluster_fs.py index 346cc5c20e..2f307242df 100644 --- a/crmsh/cluster_fs.py +++ b/crmsh/cluster_fs.py @@ -1,3 +1,4 @@ +import typing import re from contextlib import contextmanager from . import utils, sh @@ -13,162 +14,218 @@ logger_utils = log.LoggerUtils(logger) -class OCFS2Manager(object): +class Error(ValueError): + def __init__(self, msg: str): + super().__init__(msg) + + +class ClusterFSManager(object): """ - Class to manage OCFS2 and configure related resources + Class to manage cluster filesystem (OCFS2 or GFS2) + and configure related resources """ - RA_ID_PREFIX = "ocfs2-" - DLM_RA_ID = "{}dlm".format(RA_ID_PREFIX) - FS_RA_ID = "{}clusterfs".format(RA_ID_PREFIX) - LVMLOCKD_RA_ID = "{}lvmlockd".format(RA_ID_PREFIX) - LVMACTIVATE_RA_ID = "{}lvmactivate".format(RA_ID_PREFIX) - GROUP_ID = "{}group".format(RA_ID_PREFIX) - CLONE_ID = "{}clone".format(RA_ID_PREFIX) - VG_ID = "{}vg".format(RA_ID_PREFIX) - LV_ID = "{}lv".format(RA_ID_PREFIX) - - MAX_CLONE_NUM = 8 - # Note: using undocumented '-x' switch to avoid prompting if overwriting - MKFS_CMD = "mkfs.ocfs2 --cluster-stack pcmk --cluster-name {} -N {} -x {}" - HINTS_WHEN_RUNNING = """ -The cluster service has already been initialized, but the prerequisites are missing -to configure OCFS2. Please fix it and use the stage procedure to configure OCFS2 separately, -e.g. crm cluster init ocfs2 -o - """ - def __init__(self, context): """ Init function """ self.ocfs2_devices = context.ocfs2_devices + self.gfs2_devices = context.gfs2_devices + self.stage = context.stage self.use_cluster_lvm2 = context.use_cluster_lvm2 self.mount_point = context.mount_point - self.use_stage = context.stage == "ocfs2" + self.use_stage = context.stage in ("ocfs2", "gfs2") self.yes_to_all = context.yes_to_all - self.cluster_name = None self.exist_ra_id_list = [] self.vg_id = None self.group_id = None + # device that finally mounted self.target_device = None + self.type = None + self.devices = [] - def _verify_packages(self, use_cluster_lvm2=False): + self._verify_options() + + if self.ocfs2_devices: + self.type = "OCFS2" + self.devices = self.ocfs2_devices + elif self.gfs2_devices: + self.type = "GFS2" + self.devices = self.gfs2_devices + if self.type: + prefix = self.type.lower() + self.fstype = prefix + + # Consider such issue might not be fixed in the local pacemaker version + # https://github.com/ClusterLabs/pacemaker/pull/3766 + # DLM RA's id shouldn avoid using 'gfs' as prefix + self.DLM_RA_ID = "dlm" + self.FS_RA_ID = f"{prefix}-clusterfs" + self.LVMLOCKD_RA_ID = f"{prefix}-lvmlockd" + self.LVMACTIVATE_RA_ID = f"{prefix}-lvmactivate" + self.GROUP_ID = f"{prefix}-group" + self.CLONE_ID = f"{prefix}-clone" + self.VG_ID = f"{prefix}-vg" + self.LV_ID = f"{prefix}-lv" + + def _verify_packages(self, fs_type=None, use_cluster_lvm2=None): """ Find if missing required package """ - required_packages = ["ocfs2-tools"] - if use_cluster_lvm2: + _type = fs_type or self.type + _use_cluster_lvm2 = use_cluster_lvm2 or self.use_cluster_lvm2 + cluster_nodes = utils.list_cluster_nodes() if self.use_stage else [] + + package_requirements = { + "GFS2": ["gfs2-utils"], + "OCFS2": ["ocfs2-tools"] + } + required_packages = package_requirements.get(_type, []) + if _use_cluster_lvm2: required_packages.append("lvm2-lockd") - for pkg in required_packages: - if not utils.package_is_installed(pkg): - raise ValueError("Missing required package for configuring OCFS2: {}".format(pkg)) + + def check_packages(node=None): + for pkg in required_packages: + if not utils.package_is_installed(pkg, node): + node_info = f" on {node}" if node else "" + raise Error(f"Missing required package for configuring {_type}{node_info}: {pkg}") + + for node in cluster_nodes: + check_packages(node) + else: + check_packages() def _verify_options(self): """ - Verify options related with OCFS2 + Verify options related with OCFS2 and GFS2 """ - if self.use_stage and not self.ocfs2_devices: - raise ValueError("ocfs2 stage require -o option") - if len(self.ocfs2_devices) > 1 and not self.use_cluster_lvm2: - raise ValueError("Without Cluster LVM2 (-C option), -o option only support one device") - if self.use_cluster_lvm2 and not self.ocfs2_devices: - raise ValueError("-C option only valid together with -o option") + if self.gfs2_devices and self.ocfs2_devices: + raise Error("Can't use -g and -o options together") + if self.stage == "ocfs2" and not self.ocfs2_devices: + raise Error("ocfs2 stage require -o option") + if self.stage == "gfs2" and not self.gfs2_devices: + raise Error("gfs2 stage require -g option") + if self.use_cluster_lvm2: + if not self.ocfs2_devices and not self.gfs2_devices: + raise Error("-C option only valid together with -o or -g option") + else: + if len(self.ocfs2_devices) > 1: + raise Error("Without Cluster LVM2 (-C option), -o option only support one device") + elif len(self.gfs2_devices) > 1: + raise Error("Without Cluster LVM2 (-C option), -g option only support one device") if self.mount_point and utils.has_mount_point_used(self.mount_point): - raise ValueError("Mount point {} already mounted".format(self.mount_point)) + raise Error(f"Mount point {self.mount_point} already mounted") def _verify_devices(self): """ - Verify ocfs2 devices + Verify OCFS2/GFS2 devices """ - for dev in self.ocfs2_devices: + for dev in self.devices: if not utils.is_block_device(dev): - raise ValueError("{} doesn't look like a block device".format(dev)) + raise Error(f"{dev} doesn't look like a block device") if utils.is_dev_used_for_lvm(dev) and self.use_cluster_lvm2: - raise ValueError("{} is a Logical Volume, cannot be used with the -C option".format(dev)) + raise Error(f"{dev} is a Logical Volume, cannot be used with the -C option") if utils.has_disk_mounted(dev): - raise ValueError("{} already mounted".format(dev)) + raise Error(f"{dev} is already mounted") def _check_if_already_configured(self): """ - Check if ocfs2 related resource already configured + Check if OCFS2/GFS2 related resource already configured """ if not self.use_stage: return out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show") - if "fstype=ocfs2" in out: - logger.info("Already configured OCFS2 related resources") + if f"fstype={self.fstype}" in out: + logger.warning("Already configured %s related resources", self.type) raise utils.TerminateSubCommand - def _static_verify(self): + def _pre_verify(self): """ Verify before configuring on init process """ - self._verify_packages(self.use_cluster_lvm2) + self._verify_packages() self._check_if_already_configured() - self._verify_options() self._verify_devices() - def _dynamic_raise_error(self, error_msg): - """ - Customize error message after cluster running + @property + def error_hints_for_stage(self): + hints = f""" +The cluster service has already been initialized, but the prerequisites are missing +to configure {self.type}. Please fix it and use the stage procedure to configure {self.type} separately, +e.g. crm cluster init {self.type.lower()} -o """ - raise ValueError(error_msg + ("" if self.use_stage else self.HINTS_WHEN_RUNNING)) + return "" if self.use_stage else hints - def _check_sbd_and_ocfs2_dev(self): + def _check_device_with_sbd_device(self): """ - Raise error when ocfs2 device is the same with sbd device + Raise error when OCFS2/GFS2 device is the same with sbd device """ from . import sbd if ServiceManager().service_is_enabled("sbd.service"): sbd_device_list = sbd.SBDManager.get_sbd_device_from_config() - for dev in self.ocfs2_devices: + for dev in self.devices: if dev in sbd_device_list: - self._dynamic_raise_error("{} cannot be the same with SBD device".format(dev)) + msg = f"{dev} cannot be the same with SBD device" + self.error_hints_for_stage + raise Error(msg) - def _confirm_to_overwrite_ocfs2_dev(self): + def _confirm_to_overwrite_device(self): """ - Confirm to overwrit ocfs2 device on interactive mode + Confirm to overwrit OCFS2/GFS2 device on interactive mode """ - for dev in self.ocfs2_devices: + for dev in self.devices: msg = "" if utils.has_dev_partitioned(dev): - msg = "Found a partition table in {}".format(dev) + msg = f"Found a partition table in {dev}" else: fs_type = utils.get_dev_fs_type(dev) if fs_type: - msg = "{} contains a {} file system".format(dev, fs_type) - if msg and not bootstrap.confirm("{} - Proceed anyway?".format(msg)): + msg = f"{dev} contains a {fs_type} file system" + if msg and not bootstrap.confirm(f"{msg} - overwrite?"): raise utils.TerminateSubCommand - for dev in self.ocfs2_devices: - sh.cluster_shell().get_stdout_or_raise_error("wipefs -a {}".format(dev)) + shell = sh.cluster_shell() + for dev in self.devices: + shell.get_stdout_or_raise_error(f"wipefs -a {dev}") - def _dynamic_verify(self): + def init_verify(self): """ - Verify after cluster running + Verify after cluster running on init node """ if not utils.has_stonith_running(): - self._dynamic_raise_error("OCFS2 requires stonith device configured and running") + msg = f"{self.type} requires stonith device configured and running." + self.error_hints_for_stage + raise Error(msg) - self._check_sbd_and_ocfs2_dev() - self._confirm_to_overwrite_ocfs2_dev() + self._check_device_with_sbd_device() + self._confirm_to_overwrite_device() - def _gen_ra_scripts(self, ra_type, kv): + def _gen_ra_scripts(self, ra_type: str, kv: dict) -> typing.Tuple[str, str]: """ Generate ra scripts Return id and scripts """ + if ra_type not in ra.CONFIGURE_RA_TEMPLATE_DICT: + raise Error(f"Unsupported RA type: {ra_type}") config_scripts = "" kv["id"] = utils.gen_unused_id(self.exist_ra_id_list, kv["id"]) config_scripts = ra.CONFIGURE_RA_TEMPLATE_DICT[ra_type].format(**kv) return kv["id"], config_scripts - def _mkfs(self, target): + def _mkfs(self): """ - Creating OCFS2 filesystem for the target device + Creating OCFS2/GFS2 filesystem for the target device """ - with logger_utils.status_long(" Creating OCFS2 filesystem for {}".format(target)): - self.cluster_name = corosync.get_value('totem.cluster_name') - sh.cluster_shell().get_stdout_or_raise_error(self.MKFS_CMD.format(self.cluster_name, self.MAX_CLONE_NUM, target)) + cluster_name = corosync.get_value('totem.cluster_name') + mkfs_cmd = "" + if self.type == "OCFS2": + # TODO now -N value is fixed to 8, need to be configurable in the future if needed + mkfs_cmd = f"mkfs.ocfs2 --cluster-stack pcmk --cluster-name {cluster_name} -N 8 -x {self.target_device}" + elif self.type == "GFS2": + # TODO make sure the lock table name is real unique in the future if needed + lock_table_name = f"{cluster_name}:FS_{utils.randomword(12)}" + # TODO now -j value is fixed to 8, need to be configurable in the future if needed + mkfs_cmd = f"mkfs.gfs2 -t {lock_table_name} -p lock_dlm -j 8 {self.target_device} -O" + logger.debug("mkfs command: %s", mkfs_cmd) + with logger_utils.status_long(f"Creating {self.fstype} filesystem on {self.target_device}"): + sh.cluster_shell().get_stdout_or_raise_error(mkfs_cmd) @contextmanager def _vg_change(self): @@ -176,36 +233,36 @@ def _vg_change(self): vgchange process using contextmanager """ shell = sh.cluster_shell() - shell.get_stdout_or_raise_error("vgchange -ay {}".format(self.vg_id)) + shell.get_stdout_or_raise_error(f"vgchange -ay {self.vg_id}") try: yield finally: - shell.get_stdout_or_raise_error("vgchange -an {}".format(self.vg_id)) + shell.get_stdout_or_raise_error(f"vgchange -an {self.vg_id}") def _create_lv(self): """ Create PV, VG, LV and return LV path """ - disks_string = ' '.join(self.ocfs2_devices) + disks_string = ' '.join(self.devices) shell = sh.cluster_shell() # Create PV - with logger_utils.status_long(" Creating PV for {}".format(disks_string)): - shell.get_stdout_or_raise_error("pvcreate {} -y".format(disks_string)) + with logger_utils.status_long(f"Creating PV for {disks_string}"): + shell.get_stdout_or_raise_error(f"pvcreate {disks_string} -y") # Create VG self.vg_id = utils.gen_unused_id(utils.get_all_vg_name(), self.VG_ID) - with logger_utils.status_long(" Creating VG {}".format(self.vg_id)): - shell.get_stdout_or_raise_error("vgcreate --shared {} {} -y".format(self.vg_id, disks_string)) + with logger_utils.status_long(f"Creating VG {self.vg_id}"): + shell.get_stdout_or_raise_error(f"vgcreate --shared {self.vg_id} {disks_string} -y") # Create LV - with logger_utils.status_long(" Creating LV {} on VG {}".format(self.LV_ID, self.vg_id)): + with logger_utils.status_long(f"Creating LV {self.LV_ID} on VG {self.vg_id}"): pe_number = utils.get_pe_number(self.vg_id) - shell.get_stdout_or_raise_error("lvcreate -l {} {} -n {} -y".format(pe_number, self.vg_id, self.LV_ID)) + shell.get_stdout_or_raise_error(f"lvcreate -l {pe_number} {self.vg_id} -n {self.LV_ID} -y") - return "/dev/{}/{}".format(self.vg_id, self.LV_ID) + return f"/dev/{self.vg_id}/{self.LV_ID}" - def _gen_group_and_clone_scripts(self, ra_list): + def _gen_group_and_clone_scripts(self, ra_list: list) -> str: """ Generate group and clone scripts """ @@ -224,7 +281,7 @@ def _gen_fs_scripts(self): fs_kv = { "id": self.FS_RA_ID, "mnt_point": self.mount_point, - "fs_type": "ocfs2", + "fs_type": self.fstype, "device": self.target_device } return self._gen_ra_scripts("Filesystem", fs_kv) @@ -246,21 +303,24 @@ def _config_dlm(self): dlm_id, dlm_scripts = self._gen_ra_scripts("DLM", {"id":self.DLM_RA_ID}) group_clone_scripts = self._gen_group_and_clone_scripts([dlm_id]) config_scripts = dlm_scripts + group_clone_scripts - self._load_append_and_wait(config_scripts, dlm_id, " Wait for DLM({}) start".format(dlm_id), need_append=False) + msg = f"Wait for DLM ({dlm_id}) start" + self._load_append_and_wait(config_scripts, dlm_id, msg, need_append=False) def _config_lvmlockd(self): """ Configure LVMLockd resource """ _id, _scripts = self._gen_ra_scripts("LVMLockd", {"id":self.LVMLOCKD_RA_ID}) - self._load_append_and_wait(_scripts, _id, " Wait for LVMLockd({}) start".format(_id)) + msg = f"Wait for LVMLockd ({_id}) start" + self._load_append_and_wait(_scripts, _id, msg) def _config_lvmactivate(self): """ Configure LVMActivate resource """ _id, _scripts = self._gen_ra_scripts("LVMActivate", {"id": self.LVMACTIVATE_RA_ID, "vgname": self.vg_id}) - self._load_append_and_wait(_scripts, _id, " Wait for LVMActivate({}) start".format(_id)) + msg = f"Wait for LVMActivate ({_id}) start" + self._load_append_and_wait(_scripts, _id, msg) def _config_fs(self): """ @@ -268,79 +328,75 @@ def _config_fs(self): """ utils.mkdirp(self.mount_point) _id, _scripts = self._gen_fs_scripts() - self._load_append_and_wait(_scripts, _id, " Wait for Filesystem({}) start".format(_id)) + msg = f"Wait for Filesystem ({_id}) start" + self._load_append_and_wait(_scripts, _id, msg) - def _config_resource_stack_lvm2(self): - """ - Configure dlm + lvmlockd + lvm-activate + Filesystem - """ + def _configure_resource_stack(self): self._config_dlm() - self._config_lvmlockd() - self.target_device = self._create_lv() - with self._vg_change(): - self._mkfs(self.target_device) - self._config_lvmactivate() - self._config_fs() - def _config_resource_stack_ocfs2_along(self): - """ - Configure dlm + Filesystem - """ - self._config_dlm() - self.target_device = self.ocfs2_devices[0] - self._mkfs(self.target_device) + if self.use_cluster_lvm2: + # Configure dlm + lvmlockd + lvm-activate + Filesystem + self._config_lvmlockd() + self.target_device = self._create_lv() + with self._vg_change(): + self._mkfs() + self._config_lvmactivate() + else: + # Configure dlm + Filesystem + self.target_device = self.devices[0] + self._mkfs() + self._config_fs() - def init_ocfs2(self): + logger.info("%s device %s mounted on %s", self.type, self.target_device, self.mount_point) + + def init(self): """ - OCFS2 configure process on init node + OCFS2/GFS2 configure process on init node """ - logger.info("Configuring OCFS2") - self._dynamic_verify() + logger.info("Configuring %s", self.type) + + self.init_verify() + self.exist_ra_id_list = utils.all_exist_id() no_quorum_policy_value = utils.get_property("no-quorum-policy") if not no_quorum_policy_value or no_quorum_policy_value != "freeze": utils.set_property("no-quorum-policy", "freeze") - logger.info(" 'no-quorum-policy' is changed to \"freeze\"") - if self.use_cluster_lvm2: - self._config_resource_stack_lvm2() - else: - self._config_resource_stack_ocfs2_along() - logger.info(" OCFS2 device %s mounted on %s", self.target_device, self.mount_point) + self._configure_resource_stack() - def _find_target_on_join(self, peer): + def _find_target_on_join(self, peer) -> dict: """ - Find device name from OCF Filesystem param on peer node + Find device name from OCF2/GFS2 Filesystem param on peer node + Return (cluster_fs_type, device) """ + fstype_pattern = r'fstype=(ocfs2|gfs2)' + device_pattern = r'device="([^"]+)"' + pattern = rf'{fstype_pattern}.*{device_pattern}|{device_pattern}.*{fstype_pattern}' out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show", peer) - for line in out.splitlines(): - if "fstype=ocfs2" in line: - res = re.search("device=\"(.*?)\"", line) - if res: - return res.group(1) - else: - raise ValueError("Filesystem require configure device") + res = re.search(pattern, out) + if res: + cluster_fs_type = res.group(1) or res.group(4) + device = res.group(2) or res.group(3) + return {"cluster_fs_type": cluster_fs_type, "device": device} return None - def join_ocfs2(self, peer): + def join(self, peer): """ - Called on join process, to verify ocfs2 environment + Called on join process, to verify OCFS2/GFS2 environment """ - target = self._find_target_on_join(peer) - if not target: + target_dict = self._find_target_on_join(peer) + if not target_dict: return - with logger_utils.status_long("Verify OCFS2 environment"): + cluster_fs_type, device = target_dict["cluster_fs_type"], target_dict["device"] + with logger_utils.status_long(f"Verify {cluster_fs_type.upper()} environment on {device}"): use_cluster_lvm2 = xmlutil.CrmMonXmlParser(peer).is_resource_configured(constants.LVMLOCKD_RA) - self._verify_packages(use_cluster_lvm2) - if utils.is_dev_a_plain_raw_disk_or_partition(target, peer): - utils.compare_uuid_with_peer_dev([target], peer) + self._verify_packages(cluster_fs_type.upper(), use_cluster_lvm2) + if utils.is_dev_a_plain_raw_disk_or_partition(device, peer): + utils.compare_uuid_with_peer_dev([device], peer) @classmethod - def verify_ocfs2(cls, ctx): - """ - Verify OCFS2 related packages and environment - """ + def pre_verify(cls, ctx): inst = cls(ctx) - inst._static_verify() + inst._pre_verify() diff --git a/crmsh/ui_cluster.py b/crmsh/ui_cluster.py index 8070797e3b..2666651882 100644 --- a/crmsh/ui_cluster.py +++ b/crmsh/ui_cluster.py @@ -345,6 +345,7 @@ def do_init(self, context, *args): ocfs2 Configure OCFS2 (requires -o ) NOTE: this is a Technical Preview vgfs Create volume group and filesystem (ocfs2 template only, requires -o ) NOTE: this stage is an alias of ocfs2 stage + gfs2 Configure GFS2 (requires -g ) NOTE: this is a Technical Preview admin Create administration virtual IP (optional) qdevice Configure qdevice and qnetd @@ -375,9 +376,15 @@ def do_init(self, context, *args): # Setup the cluster on the current node, with SBD+OCFS2 crm cluster init -s -o -y + # Setup the cluster on the current node, with SBD+GFS2 + crm cluster init -s -g -y + # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM crm cluster init -s -o -o -C -y + # Setup the cluster on the current node, with SBD+GFS2+Cluster LVM + crm cluster init -s -g -g -C -y + # Add SBD on a running cluster crm cluster init sbd -s -y @@ -392,6 +399,9 @@ def do_init(self, context, *args): # Add OCFS2+Cluster LVM on a running cluster crm cluster init ocfs2 -o -o -C -y + + # Add GFS2+Cluster LVM on a running cluster + crm cluster init gfs2 -g -g -C -y """, add_help=False, formatter_class=RawDescriptionHelpFormatter) parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message") @@ -449,10 +459,12 @@ def do_init(self, context, *args): help="Block device to use for SBD fencing, use \";\" as separator or -s multiple times for multi path (up to 3 devices)") storage_group.add_argument("-o", "--ocfs2-device", dest="ocfs2_devices", metavar="DEVICE", action=CustomAppendAction, default=[], help="Block device to use for OCFS2; When using Cluster LVM2 to manage the shared storage, user can specify one or multiple raw disks, use \";\" as separator or -o multiple times for multi path (must specify -C option) NOTE: this is a Technical Preview") + storage_group.add_argument("-g", "--gfs2-device", dest="gfs2_devices", metavar="DEVICE", action=CustomAppendAction, default=[], + help="Block device to use for GFS2; When using Cluster LVM2 to manage the shared storage, user can specify one or multiple raw disks, use \";\" as separator or -g multiple times for multi path (must specify -C option) NOTE: this is a Technical Preview") storage_group.add_argument("-C", "--cluster-lvm2", action="store_true", dest="use_cluster_lvm2", - help="Use Cluster LVM2 (only valid together with -o option) NOTE: this is a Technical Preview") + help="Use Cluster LVM2 (only valid together with -o or -g option) NOTE: this is a Technical Preview") storage_group.add_argument("-m", "--mount-point", dest="mount_point", metavar="MOUNT", default="/srv/clusterfs", - help="Mount point for OCFS2 device (default is /srv/clusterfs, only valid together with -o option) NOTE: this is a Technical Preview") + help="Mount point for OCFS2 or GFS2 device (default is /srv/clusterfs, only valid together with -o or -g option) NOTE: this is a Technical Preview") options, args = parse_options(parser, args) if options is None or args is None: diff --git a/data-manifest b/data-manifest index 013940bfcc..902ffb0aa0 100644 --- a/data-manifest +++ b/data-manifest @@ -198,7 +198,6 @@ test/unittests/test_gv.py test/unittests/test_handles.py test/unittests/test_lock.py test/unittests/test_objset.py -test/unittests/test_ocfs2.py test/unittests/test_parallax.py test/unittests/test_parse.py test/unittests/test_prun.py From 03717a023425b6e83d87eb4b53e41fc3e4ecd0e8 Mon Sep 17 00:00:00 2001 From: xin liang Date: Fri, 13 Dec 2024 14:40:12 +0800 Subject: [PATCH 3/5] Dev: behave: Add functional test for GFS2 --- data-manifest | 1 + test/features/bootstrap_options.feature | 2 +- test/features/gfs2.feature | 77 +++++++++++++++++++++++++ test/features/steps/const.py | 26 +++++++-- test/run-functional-tests | 2 +- 5 files changed, 101 insertions(+), 7 deletions(-) create mode 100644 test/features/gfs2.feature diff --git a/data-manifest b/data-manifest index 902ffb0aa0..65f2924945 100644 --- a/data-manifest +++ b/data-manifest @@ -79,6 +79,7 @@ test/features/crm_report_bugs.feature test/features/crm_report_normal.feature test/features/environment.py test/features/geo_setup.feature +test/features/gfs2.feature test/features/healthcheck.feature test/features/ocfs2.feature test/features/qdevice_options.feature diff --git a/test/features/bootstrap_options.feature b/test/features/bootstrap_options.feature index 16b3660eaf..4d212c87ab 100644 --- a/test/features/bootstrap_options.feature +++ b/test/features/bootstrap_options.feature @@ -45,7 +45,7 @@ Feature: crmsh bootstrap process - options @clean Scenario: Stage validation When Try "crm cluster init fdsf -y" on "hanode1" - Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, corosync, sbd, cluster, ocfs2, admin, qdevice)" in stderr + Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, corosync, sbd, cluster, ocfs2, gfs2, admin, qdevice)" in stderr When Try "crm cluster join fdsf -y" on "hanode1" Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, ssh_merge, cluster)" in stderr When Try "crm cluster join ssh -y" on "hanode1" diff --git a/test/features/gfs2.feature b/test/features/gfs2.feature new file mode 100644 index 0000000000..858ecb434d --- /dev/null +++ b/test/features/gfs2.feature @@ -0,0 +1,77 @@ +@gfs2 +Feature: GFS2 configuration/verify using bootstrap + +@clean +Scenario: Error cases + Given Has disk "/dev/sda1" on "hanode1" + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Try "crm cluster init gfs2 -g /dev/sda1 -y" + Then Expected "GFS2 requires stonith device configured and running" in stderr + When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1" + Then Service "sbd.service" is "started" on "hanode1" + When Try "crm cluster init gfs2 -g /dev/sda1 -y" + Then Expected "/dev/sda1 cannot be the same with SBD device" in stderr + When Run "crm cluster init gfs2 -g /dev/sda2 -y" on "hanode1" + Then Resource "gfs2-clusterfs" type "heartbeat:Filesystem" is "Started" + When Try "crm cluster init gfs2 -g /dev/sda3 -y -m /tmp/data" + Then Expected "Already configured GFS2 related resources" in stderr + +@clean +Scenario: Configure gfs2 along with init process + Given Has disk "/dev/sda1" on "hanode1" + And Has disk "/dev/sda2" on "hanode1" + When Run "crm cluster init -s /dev/sda1 -g /dev/sda2 -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + And Service "sbd" is "started" on "hanode1" + And Resource "stonith-sbd" type "fence_sbd" is "Started" + And Resource "dlm" type "pacemaker:controld" is "Started" + And Resource "gfs2-clusterfs" type "heartbeat:Filesystem" is "Started" + +@clean +Scenario: Configure cluster lvm2 + gfs2 with init process + Given Has disk "/dev/sda1" on "hanode1" + And Has disk "/dev/sda2" on "hanode1" + And Has disk "/dev/sda3" on "hanode1" + When Run "crm cluster init -s /dev/sda1 -g /dev/sda2 -g /dev/sda3 -C -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + And Service "sbd" is "started" on "hanode1" + And Resource "stonith-sbd" type "fence_sbd" is "Started" + And Resource "dlm" type "pacemaker:controld" is "Started" + And Resource "gfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started" + And Resource "gfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started" + And Resource "gfs2-clusterfs" type "heartbeat:Filesystem" is "Started" + +@clean +Scenario: Add gfs2 alone on a running cluster + Given Has disk "/dev/sda1" on "hanode1" + And Has disk "/dev/sda2" on "hanode1" + And Has disk "/dev/sda1" on "hanode2" + And Has disk "/dev/sda2" on "hanode2" + When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" + And Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Online nodes are "hanode1 hanode2" + And Service "sbd" is "started" on "hanode1" + And Service "sbd" is "started" on "hanode2" + And Resource "stonith-sbd" type "fence_sbd" is "Started" + When Run "crm cluster init gfs2 -g /dev/sda2 -y" on "hanode1" + Then Resource "dlm" type "pacemaker:controld" is "Started" + And Resource "gfs2-clusterfs" type "heartbeat:Filesystem" is "Started" + +@clean +Scenario: Add cluster lvm2 + gfs2 on a running cluster + Given Has disk "/dev/sda1" on "hanode1" + And Has disk "/dev/sda2" on "hanode1" + And Has disk "/dev/sda1" on "hanode2" + And Has disk "/dev/sda2" on "hanode2" + When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" + And Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Online nodes are "hanode1 hanode2" + And Service "sbd" is "started" on "hanode1" + And Service "sbd" is "started" on "hanode2" + And Resource "stonith-sbd" type "fence_sbd" is "Started" + When Run "crm cluster init gfs2 -g /dev/sda2 -C -y" on "hanode1" + Then Resource "dlm" type "pacemaker:controld" is "Started" + And Resource "gfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started" + And Resource "gfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started" + And Resource "gfs2-clusterfs" type "heartbeat:Filesystem" is "Started" diff --git a/test/features/steps/const.py b/test/features/steps/const.py index a8845bdf63..9ae053d57f 100644 --- a/test/features/steps/const.py +++ b/test/features/steps/const.py @@ -142,12 +142,18 @@ multiple raw disks, use ";" as separator or -o multiple times for multi path (must specify -C option) NOTE: this is a Technical Preview - -C, --cluster-lvm2 Use Cluster LVM2 (only valid together with -o option) + -g DEVICE, --gfs2-device DEVICE + Block device to use for GFS2; When using Cluster LVM2 + to manage the shared storage, user can specify one or + multiple raw disks, use ";" as separator or -g + multiple times for multi path (must specify -C option) NOTE: this is a Technical Preview + -C, --cluster-lvm2 Use Cluster LVM2 (only valid together with -o or -g + option) NOTE: this is a Technical Preview -m MOUNT, --mount-point MOUNT - Mount point for OCFS2 device (default is - /srv/clusterfs, only valid together with -o option) - NOTE: this is a Technical Preview + Mount point for OCFS2 or GFS2 device (default is + /srv/clusterfs, only valid together with -o or -g + option) NOTE: this is a Technical Preview Stage can be one of: ssh Create SSH keys for passwordless SSH between cluster nodes @@ -158,6 +164,7 @@ ocfs2 Configure OCFS2 (requires -o ) NOTE: this is a Technical Preview vgfs Create volume group and filesystem (ocfs2 template only, requires -o ) NOTE: this stage is an alias of ocfs2 stage + gfs2 Configure GFS2 (requires -g ) NOTE: this is a Technical Preview admin Create administration virtual IP (optional) qdevice Configure qdevice and qnetd @@ -188,9 +195,15 @@ # Setup the cluster on the current node, with SBD+OCFS2 crm cluster init -s -o -y + # Setup the cluster on the current node, with SBD+GFS2 + crm cluster init -s -g -y + # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM crm cluster init -s -o -o -C -y + # Setup the cluster on the current node, with SBD+GFS2+Cluster LVM + crm cluster init -s -g -g -C -y + # Add SBD on a running cluster crm cluster init sbd -s -y @@ -204,7 +217,10 @@ crm cluster init qdevice --qnetd-hostname -y # Add OCFS2+Cluster LVM on a running cluster - crm cluster init ocfs2 -o -o -C -y''' + crm cluster init ocfs2 -o -o -C -y + + # Add GFS2+Cluster LVM on a running cluster + crm cluster init gfs2 -g -g -C -y''' CRM_CLUSTER_JOIN_H_OUTPUT = '''Join existing cluster diff --git a/test/run-functional-tests b/test/run-functional-tests index deacb78701..f12b8dc68f 100755 --- a/test/run-functional-tests +++ b/test/run-functional-tests @@ -14,7 +14,7 @@ HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64" HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64" BEHAVE_CASE_DIR="$(dirname $0)/features/" -BEHAVE_CASE_EXCLUDE="sbd|ocfs2" +BEHAVE_CASE_EXCLUDE="sbd|ocfs2|gfs2" read -r -d '' SSHD_CONFIG_AZURE << EOM PermitRootLogin no From 236f50a814d7830351edb169ca24d041111d8d73 Mon Sep 17 00:00:00 2001 From: xin liang Date: Wed, 18 Dec 2024 11:28:39 +0800 Subject: [PATCH 4/5] Dev: report: Support crm report to collect GFS2 information --- crmsh/report/collect.py | 66 +++++++++++++++++++++++++-------------- crmsh/report/constants.py | 1 + 2 files changed, 43 insertions(+), 24 deletions(-) diff --git a/crmsh/report/collect.py b/crmsh/report/collect.py index ed4bf17224..54e719c747 100644 --- a/crmsh/report/collect.py +++ b/crmsh/report/collect.py @@ -118,15 +118,15 @@ def dump_D_process() -> str: return out_string -def lsof_ocfs2_device() -> str: +def lsof_cluster_fs_device(fs_type: str) -> str: """ - List open files for OCFS2 device + List open files for OCFS2/GFS2 device """ out_string = "" sh_utils_inst = ShellUtils() _, out, _ = sh_utils_inst.get_stdout_stderr("mount") - dev_list = re.findall("^(.*) on .* type ocfs2 ", out, re.MULTILINE) + dev_list = re.findall(f"^(.*) on .* type {fs_type.lower()} ", out, re.M) for dev in dev_list: cmd = f"lsof {dev}" out_string += "\n\n#=====[ Command ] ==========================#\n" @@ -138,9 +138,9 @@ def lsof_ocfs2_device() -> str: return out_string -def ocfs2_commands_output() -> str: +def cluster_fs_commands_output(fs_type: str) -> str: """ - Run ocfs2 related commands, return outputs + Run OCFS2/GFS2 related commands, return outputs """ out_string = "" @@ -148,11 +148,16 @@ def ocfs2_commands_output() -> str: "dmesg", "ps -efL", "lsblk -o 'NAME,KNAME,MAJ:MIN,FSTYPE,LABEL,RO,RM,MODEL,SIZE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,MOUNTPOINT'", - "mounted.ocfs2 -f", "findmnt", - "mount", - "cat /sys/fs/ocfs2/cluster_stack" + "mount" ] + + if fs_type.lower() == "ocfs2": + cmds.extend([ + "mounted.ocfs2 -f", + "cat /sys/fs/ocfs2/cluster_stack" + ]) + for cmd in cmds: cmd_name = cmd.split()[0] if not shutil.which(cmd_name): @@ -166,25 +171,38 @@ def ocfs2_commands_output() -> str: return out_string -def collect_ocfs2_info(context: core.Context) -> None: +def collect_cluster_fs_info(context: core.Context) -> None: """ - Collects OCFS2 information + Collects OCFS2 and GFS2 information """ - out_string = "" - rc, out, err = ShellUtils().get_stdout_stderr("mounted.ocfs2 -d") - if rc != 0: - out_string += f"Failed to run \"mounted.ocfs2 -d\": {err}" - # No ocfs2 device, just header line printed - elif len(out.split('\n')) == 1: - out_string += "No ocfs2 partitions found" - else: - out_string += dump_D_process() - out_string += lsof_ocfs2_device() - out_string += ocfs2_commands_output() + def collect_info(cmd: str, fs_type: str, output_file: str) -> None: + out_string = "" + no_partition_msg = f"No {fs_type} partitions found" + + rc, out, err = ShellUtils().get_stdout_stderr(cmd) + if rc != 0: + if fs_type == "OCFS2": + error_msg = f"Failed to run \"{cmd}\": {err}" + out_string += error_msg + logger.error(error_msg) + elif fs_type == "GFS2": + out_string += no_partition_msg + elif fs_type == "OCFS2" and len(out.split('\n')) == 1: + out_string += no_partition_msg + else: + out_string += dump_D_process() + out_string += lsof_cluster_fs_device(fs_type) + out_string += cluster_fs_commands_output(fs_type) + + target_f = os.path.join(context.work_dir, output_file) + logger.debug("Dump %s information into %s", fs_type, utils.real_path(target_f)) + crmutils.str2file(out_string, target_f) + + # Collect OCFS2 information + collect_info("mounted.ocfs2 -d", "OCFS2", constants.OCFS2_F) - ocfs2_f = os.path.join(context.work_dir, constants.OCFS2_F) - logger.debug(f"Dump OCFS2 information into {utils.real_path(ocfs2_f)}") - crmutils.str2file(out_string, ocfs2_f) + # Collect GFS2 information + collect_info('mount|grep "type gfs2"', "GFS2", constants.GFS2_F) def collect_ratraces(context: core.Context) -> None: diff --git a/crmsh/report/constants.py b/crmsh/report/constants.py index ea3bb35e4e..3f20cd186b 100644 --- a/crmsh/report/constants.py +++ b/crmsh/report/constants.py @@ -57,6 +57,7 @@ SYSSTATS_F = "sysstats.txt" TIME_F = "time.txt" OCFS2_F = "ocfs2.txt" +GFS2_F = "gfs2.txt" SBD_F = "sbd.txt" QDEVICE_F = "quorum_qdevice_qnetd.txt" OSRELEASE = "/etc/os-release" From b84bdf067799c28f48b681dd4a8b806ed82c3186 Mon Sep 17 00:00:00 2001 From: xin liang Date: Tue, 17 Dec 2024 10:54:42 +0800 Subject: [PATCH 5/5] Dev: unittests: Add unit test for previous commit --- data-manifest | 1 + test/unittests/test_cluster_fs.py | 300 +++++++++++++++++ test/unittests/test_ocfs2.py | 465 -------------------------- test/unittests/test_report_collect.py | 152 +++++---- 4 files changed, 385 insertions(+), 533 deletions(-) create mode 100644 test/unittests/test_cluster_fs.py delete mode 100644 test/unittests/test_ocfs2.py diff --git a/data-manifest b/data-manifest index 65f2924945..5ba31ad115 100644 --- a/data-manifest +++ b/data-manifest @@ -189,6 +189,7 @@ test/unittests/test_bootstrap.py test/unittests/test_bugs.py test/unittests/test_cib.py test/unittests/test_cliformat.py +test/unittests/test_cluster_fs.py test/unittests/test.conf test/unittests/test_corosync.py test/unittests/test_crashtest_check.py diff --git a/test/unittests/test_cluster_fs.py b/test/unittests/test_cluster_fs.py new file mode 100644 index 0000000000..c3f4343355 --- /dev/null +++ b/test/unittests/test_cluster_fs.py @@ -0,0 +1,300 @@ +import unittest +from unittest import mock + +from crmsh import cluster_fs, utils, ra + + +class TestClusterFSManager(unittest.TestCase): + + @mock.patch("crmsh.cluster_fs.ClusterFSManager._verify_options") + def setUp(self, mock_verify_options): + ocfs2_context_one_device = mock.Mock(ocfs2_devices=["/dev/sda1"], gfs2_devices=[], use_cluster_lvm2=False, stage=None) + gfs2_context_one_device_clvm2 = mock.Mock(ocfs2_devices=[], gfs2_devices=["/dev/sda1"], use_cluster_lvm2=True) + self.ocfs2_instance_one_device = cluster_fs.ClusterFSManager(ocfs2_context_one_device) + self.gfs2_instance_one_device_clvm2 = cluster_fs.ClusterFSManager(gfs2_context_one_device_clvm2) + + ocfs2_gfs2_both_context = mock.Mock(ocfs2_devices=["/dev/sda1"], gfs2_devices=["/dev/sda2"]) + self.instance_both = cluster_fs.ClusterFSManager(ocfs2_gfs2_both_context) + + ocfs2_stage_without_device_context = mock.Mock(ocfs2_devices=[], gfs2_devices=[], stage="ocfs2") + self.instance_ocfs2_stage_without_device = cluster_fs.ClusterFSManager(ocfs2_stage_without_device_context) + + gfs2_stage_without_device_context = mock.Mock(ocfs2_devices=[], gfs2_devices=[], stage="gfs2") + self.instance_gfs2_stage_without_device = cluster_fs.ClusterFSManager(gfs2_stage_without_device_context) + + ocfs2_stage_with_device_context = mock.Mock(ocfs2_devices=["/dev/sda1"], gfs2_devices=[], stage="ocfs2", use_cluster_lvm2=False) + self.instance_ocfs2_stage_with_device = cluster_fs.ClusterFSManager(ocfs2_stage_with_device_context) + + ocfs2_stage_with_device_clvm2_context = mock.Mock(ocfs2_devices=["/dev/sda1"], gfs2_devices=[], stage="ocfs2", use_cluster_lvm2=True) + self.instance_ocfs2_stage_with_device_clvm2 = cluster_fs.ClusterFSManager(ocfs2_stage_with_device_clvm2_context) + + clvm2_without_device_context = mock.Mock(ocfs2_devices=[], gfs2_devices=[], use_cluster_lvm2=True) + self.instance_clvm2_without_device = cluster_fs.ClusterFSManager(clvm2_without_device_context) + + multi_ocfs2_devices_without_clvm2_context = mock.Mock(ocfs2_devices=["/dev/sda1", "/dev/sda2"], gfs2_devices=[], use_cluster_lvm2=False) + self.multi_ocfs2_devices_without_clvm2 = cluster_fs.ClusterFSManager(multi_ocfs2_devices_without_clvm2_context) + + multi_gfs2_devices_without_clvm2_context = mock.Mock(ocfs2_devices=[], gfs2_devices=["/dev/sda1", "/dev/sda2"], use_cluster_lvm2=False) + self.multi_gfs2_devices_without_clvm2 = cluster_fs.ClusterFSManager(multi_gfs2_devices_without_clvm2_context) + + gfs2_context_one_device_with_mount_point = mock.Mock(ocfs2_devices=[], gfs2_devices=["/dev/sda1"], use_cluster_lvm2=False, mount_point="/mnt/gfs2") + self.gfs2_instance_one_device_with_mount_point = cluster_fs.ClusterFSManager(gfs2_context_one_device_with_mount_point) + + @mock.patch("crmsh.utils.package_is_installed") + @mock.patch("crmsh.utils.list_cluster_nodes") + def test_verify_packages_local(self, mock_list_cluster_nodes, mock_package_is_installed): + mock_package_is_installed.return_value = True + self.ocfs2_instance_one_device._verify_packages() + mock_list_cluster_nodes.assert_not_called() + mock_package_is_installed.assert_called_once_with("ocfs2-tools", None) + + @mock.patch("crmsh.utils.package_is_installed") + @mock.patch("crmsh.utils.list_cluster_nodes") + def test_verify_packages_remote(self, mock_list_cluster_nodes, mock_package_is_installed): + mock_list_cluster_nodes.return_value = ["node1", "node2"] + mock_package_is_installed.return_value = False + with self.assertRaises(cluster_fs.Error) as context: + self.instance_ocfs2_stage_with_device_clvm2._verify_packages() + self.assertIn("Missing required package for configuring OCFS2 on node1: ocfs2-tools", str(context.exception)) + + def test_verify_options(self): + with self.assertRaises(cluster_fs.Error) as context: + self.instance_both._verify_options() + self.assertIn("Can't use -g and -o options together", str(context.exception)) + + with self.assertRaises(cluster_fs.Error) as context: + self.instance_ocfs2_stage_without_device._verify_options() + self.assertIn("ocfs2 stage require -o option", str(context.exception)) + + with self.assertRaises(cluster_fs.Error) as context: + self.instance_gfs2_stage_without_device._verify_options() + self.assertIn("gfs2 stage require -g option", str(context.exception)) + + with self.assertRaises(cluster_fs.Error) as context: + self.instance_clvm2_without_device._verify_options() + self.assertIn("-C option only valid together with -o or -g option", str(context.exception)) + with self.assertRaises(cluster_fs.Error) as context: + self.multi_ocfs2_devices_without_clvm2._verify_options() + self.assertIn("Without Cluster LVM2 (-C option), -o option only support one device", str(context.exception)) + + with self.assertRaises(cluster_fs.Error) as context: + self.multi_gfs2_devices_without_clvm2._verify_options() + self.assertIn("Without Cluster LVM2 (-C option), -g option only support one device", str(context.exception)) + + @mock.patch("crmsh.utils.has_mount_point_used") + def test_verify_options_mount_point(self, mock_has_mount_point_used): + mock_has_mount_point_used.return_value = True + with self.assertRaises(cluster_fs.Error) as context: + self.gfs2_instance_one_device_with_mount_point._verify_options() + self.assertIn("Mount point /mnt/gfs2 already mounted", str(context.exception)) + + @mock.patch("crmsh.utils.is_block_device") + def test_verify_devices_not_block_device(self, mock_is_block_device): + mock_is_block_device.return_value = False + with self.assertRaises(cluster_fs.Error) as context: + self.ocfs2_instance_one_device._verify_devices() + self.assertIn("/dev/sda1 doesn't look like a block device", str(context.exception)) + + @mock.patch("crmsh.utils.is_dev_used_for_lvm") + @mock.patch("crmsh.utils.is_block_device") + def test_verify_devices_clvm2_with_lv(self, mock_is_block_device, mock_is_dev_used_for_lvm): + mock_is_block_device.return_value = True + mock_is_dev_used_for_lvm.return_value = True + with self.assertRaises(cluster_fs.Error) as context: + self.gfs2_instance_one_device_clvm2._verify_devices() + self.assertIn("/dev/sda1 is a Logical Volume, cannot be used with the -C option", str(context.exception)) + + @mock.patch("crmsh.utils.has_disk_mounted") + @mock.patch("crmsh.utils.is_dev_used_for_lvm") + @mock.patch("crmsh.utils.is_block_device") + def test_verify_devices_already_mounted(self, mock_is_block_device, mock_is_dev_used_for_lvm, mock_has_disk_mounted): + mock_is_block_device.return_value = True + mock_is_dev_used_for_lvm.return_value = False + mock_has_disk_mounted.return_value = True + with self.assertRaises(cluster_fs.Error) as context: + self.ocfs2_instance_one_device._verify_devices() + self.assertIn("/dev/sda1 is already mounted", str(context.exception)) + + @mock.patch("crmsh.sh.cluster_shell") + def test_check_if_already_configured_return(self, mock_cluster_shell): + self.ocfs2_instance_one_device._check_if_already_configured() + mock_cluster_shell.assert_not_called() + + @mock.patch("logging.Logger.warning") + @mock.patch("crmsh.sh.cluster_shell") + def test_check_if_already_configured(self, mock_cluster_shell, mock_warning): + mock_cluster_shell_inst = mock.Mock() + mock_cluster_shell.return_value = mock_cluster_shell_inst + mock_cluster_shell_inst.get_stdout_or_raise_error.return_value = """ + fstype=ocfs2 + data + """ + with self.assertRaises(utils.TerminateSubCommand): + self.instance_ocfs2_stage_with_device._check_if_already_configured() + mock_warning.assert_called_once_with("Already configured %s related resources", "OCFS2") + + def test_pre_verify(self): + self.ocfs2_instance_one_device._verify_packages = mock.Mock() + self.ocfs2_instance_one_device._check_if_already_configured = mock.Mock() + self.ocfs2_instance_one_device._verify_devices = mock.Mock() + self.ocfs2_instance_one_device._pre_verify() + self.ocfs2_instance_one_device._verify_packages.assert_called_once() + self.ocfs2_instance_one_device._check_if_already_configured.assert_called_once() + self.ocfs2_instance_one_device._verify_devices.assert_called_once() + + @mock.patch("crmsh.bootstrap.confirm") + @mock.patch("crmsh.utils.has_dev_partitioned") + def test_confirm_to_overwrite_device_no_overwrite(self, mock_has_dev_partitioned, mock_confirm): + mock_has_dev_partitioned.return_value = True + mock_confirm.return_value = False + with self.assertRaises(utils.TerminateSubCommand): + self.ocfs2_instance_one_device._confirm_to_overwrite_device() + mock_confirm.assert_called_once_with("Found a partition table in /dev/sda1 - overwrite?") + + @mock.patch("crmsh.sh.cluster_shell") + @mock.patch("crmsh.bootstrap.confirm") + @mock.patch("crmsh.utils.get_dev_fs_type") + @mock.patch("crmsh.utils.has_dev_partitioned") + def test_confirm_to_overwrite_device(self, mock_has_dev_partitioned, mock_get_dev_fs_type, mock_confirm, mock_cluster_shell): + mock_has_dev_partitioned.return_value = False + mock_get_dev_fs_type.return_value = "ext4" + mock_confirm.return_value = True + mock_cluster_shell_inst = mock.Mock() + mock_cluster_shell.return_value = mock_cluster_shell_inst + mock_cluster_shell_inst.get_stdout_or_raise_error.return_value = None + self.ocfs2_instance_one_device._confirm_to_overwrite_device() + mock_confirm.assert_called_once_with("/dev/sda1 contains a ext4 file system - overwrite?") + mock_cluster_shell_inst.get_stdout_or_raise_error.assert_called_once_with("wipefs -a /dev/sda1") + + @mock.patch("crmsh.utils.has_stonith_running") + def test_init_verify_no_stonith(self, mock_has_stonith_running): + mock_has_stonith_running.return_value = False + with self.assertRaises(cluster_fs.Error) as context: + self.instance_ocfs2_stage_with_device.init_verify() + self.assertIn("OCFS2 requires stonith device configured and running", str(context.exception)) + + def test_gen_ra_scripts_unsupport_type(self): + with self.assertRaises(cluster_fs.Error) as context: + self.ocfs2_instance_one_device._gen_ra_scripts("unsupport", {}) + self.assertIn("Unsupported RA type: unsupport", str(context.exception)) + + @mock.patch("crmsh.utils.gen_unused_id") + def test_gen_ra_scripts(self, mock_gen_unused_id): + mock_gen_unused_id.return_value = "dlm" + _id, scripts = self.ocfs2_instance_one_device._gen_ra_scripts("DLM", {"id": "dlm"}) + self.assertEqual(_id, "dlm") + self.assertEqual(scripts, ra.DLM_RA_SCRIPTS.format(id="dlm")) + + @mock.patch("crmsh.sh.cluster_shell") + @mock.patch("crmsh.log.LoggerUtils.status_long") + @mock.patch("logging.Logger.debug") + @mock.patch("crmsh.corosync.get_value") + def test_mkfs_ocfs2(self, mock_get_value, mock_debug, mock_status_long, mock_cluster_shell): + mock_get_value.return_value = "hacluster" + mock_status_long.return_value.__enter__ = mock.Mock() + mock_status_long.return_value.__exit__ = mock.Mock() + mock_cluster_shell_inst = mock.Mock() + mock_cluster_shell.return_value = mock_cluster_shell_inst + mock_cluster_shell_inst.get_stdout_or_raise_error.return_value = "" + self.ocfs2_instance_one_device.target_device = "/dev/sda1" + self.ocfs2_instance_one_device._mkfs() + mock_debug.assert_called_once_with("mkfs command: %s", "mkfs.ocfs2 --cluster-stack pcmk --cluster-name hacluster -N 8 -x /dev/sda1") + mock_status_long.assert_called_once_with("Creating ocfs2 filesystem on /dev/sda1") + + @mock.patch("crmsh.utils.randomword") + @mock.patch("crmsh.sh.cluster_shell") + @mock.patch("crmsh.log.LoggerUtils.status_long") + @mock.patch("logging.Logger.debug") + @mock.patch("crmsh.corosync.get_value") + def test_mkfs_gfs2(self, mock_get_value, mock_debug, mock_status_long, mock_cluster_shell, mock_randomword): + mock_randomword.return_value = "exezoy" + mock_get_value.return_value = "hacluster" + mock_status_long.return_value.__enter__ = mock.Mock() + mock_status_long.return_value.__exit__ = mock.Mock() + mock_cluster_shell_inst = mock.Mock() + mock_cluster_shell.return_value = mock_cluster_shell_inst + mock_cluster_shell_inst.get_stdout_or_raise_error.return_value = "" + self.gfs2_instance_one_device_clvm2.target_device = "/dev/sda1" + self.gfs2_instance_one_device_clvm2._mkfs() + mock_debug.assert_called_once_with("mkfs command: %s", "mkfs.gfs2 -t hacluster:FS_exezoy -p lock_dlm -j 8 /dev/sda1 -O") + mock_status_long.assert_called_once_with("Creating gfs2 filesystem on /dev/sda1") + + @mock.patch("crmsh.utils.set_property") + @mock.patch("crmsh.utils.get_property") + @mock.patch("crmsh.utils.all_exist_id") + @mock.patch("logging.Logger.info") + def test_init(self, mock_info, mock_all_exist_id, mock_get_property, mock_set_property): + mock_all_exist_id.return_value = ["ocfs2"] + mock_get_property.return_value = "good" + self.ocfs2_instance_one_device.init_verify = mock.Mock() + self.ocfs2_instance_one_device._configure_resource_stack = mock.Mock() + self.ocfs2_instance_one_device.init() + mock_info.assert_called_once_with("Configuring %s", "OCFS2") + mock_set_property.assert_called_once_with("no-quorum-policy", "freeze") + + @mock.patch("logging.Logger.info") + def test_configure_resource_stack_lvm2(self, mock_info): + self.gfs2_instance_one_device_clvm2._config_dlm = mock.Mock() + self.gfs2_instance_one_device_clvm2._config_lvmlockd = mock.Mock() + self.gfs2_instance_one_device_clvm2._create_lv = mock.Mock(return_value="/dev/vg/lv") + self.gfs2_instance_one_device_clvm2._vg_change = mock.Mock() + self.gfs2_instance_one_device_clvm2._vg_change.return_value.__enter__ = mock.Mock() + self.gfs2_instance_one_device_clvm2._vg_change.return_value.__exit__ = mock.Mock() + self.gfs2_instance_one_device_clvm2._mkfs = mock.Mock() + self.gfs2_instance_one_device_clvm2._config_lvmactivate = mock.Mock() + self.gfs2_instance_one_device_clvm2._config_fs = mock.Mock() + self.gfs2_instance_one_device_clvm2.mount_point = "/mnt/gfs2" + self.gfs2_instance_one_device_clvm2._configure_resource_stack() + mock_info.assert_called_once_with('%s device %s mounted on %s', 'GFS2', '/dev/vg/lv', '/mnt/gfs2') + + @mock.patch("logging.Logger.info") + def test_configure_resource_stack(self, mock_info): + self.ocfs2_instance_one_device._config_dlm = mock.Mock() + self.ocfs2_instance_one_device._mkfs = mock.Mock() + self.ocfs2_instance_one_device._config_fs = mock.Mock() + self.ocfs2_instance_one_device.mount_point = "/mnt/ocfs2" + self.ocfs2_instance_one_device._configure_resource_stack() + mock_info.assert_called_once_with('%s device %s mounted on %s', 'OCFS2', '/dev/sda1', '/mnt/ocfs2') + + @mock.patch("crmsh.sh.cluster_shell") + def test_find_target_on_join_none(self, mock_cluster_shell): + mock_cluster_shell_inst = mock.Mock() + mock_cluster_shell.return_value = mock_cluster_shell_inst + mock_cluster_shell_inst.get_stdout_or_raise_error.return_value = "data" + self.assertIsNone(self.ocfs2_instance_one_device._find_target_on_join("node1")) + mock_cluster_shell_inst.get_stdout_or_raise_error.assert_called_once_with("crm configure show", "node1") + + @mock.patch("crmsh.sh.cluster_shell") + def test_find_target_on_join(self, mock_cluster_shell): + mock_cluster_shell_inst = mock.Mock() + mock_cluster_shell.return_value = mock_cluster_shell_inst + mock_cluster_shell_inst.get_stdout_or_raise_error.return_value = """ + primitive gfs2-clusterfs Filesystem \ + params directory="/srv/clusterfs" fstype=gfs2 device="/dev/sda6" \ + op monitor interval=20 timeout=40 \ + """ + expected_dict = {"cluster_fs_type": "gfs2", "device": "/dev/sda6"} + self.assertDictEqual(self.ocfs2_instance_one_device._find_target_on_join("node1"), expected_dict) + mock_cluster_shell_inst.get_stdout_or_raise_error.assert_called_once_with("crm configure show", "node1") + + @mock.patch("crmsh.log.LoggerUtils.status_long") + def test_join_return(self, mock_status_long): + self.ocfs2_instance_one_device._find_target_on_join = mock.Mock(return_value=None) + self.ocfs2_instance_one_device.join("node1") + mock_status_long.assert_not_called() + + @mock.patch("crmsh.utils.compare_uuid_with_peer_dev") + @mock.patch("crmsh.utils.is_dev_a_plain_raw_disk_or_partition") + @mock.patch("crmsh.xmlutil.CrmMonXmlParser") + @mock.patch("crmsh.log.LoggerUtils.status_long") + def test_join(self, mock_status_long, mock_crmmonxmlparser, mock_is_dev_a_plain_raw_disk_or_partition, mock_compare_uuid_with_peer_dev): + mock_crmmonxmlparser_inst = mock.Mock() + mock_crmmonxmlparser.return_value = mock_crmmonxmlparser_inst + mock_crmmonxmlparser_inst.is_resource_configured = mock.Mock(return_value=True) + self.ocfs2_instance_one_device._find_target_on_join = mock.Mock(return_value={"cluster_fs_type": "ocfs2", "device": "/dev/sda1"}) + self.ocfs2_instance_one_device._verify_packages = mock.Mock() + mock_status_long.return_value.__enter__ = mock.Mock() + mock_status_long.return_value.__exit__ = mock.Mock() + mock_is_dev_a_plain_raw_disk_or_partition.return_value = True + self.ocfs2_instance_one_device.join("node1") + mock_status_long.assert_called_once_with("Verify OCFS2 environment on /dev/sda1") diff --git a/test/unittests/test_ocfs2.py b/test/unittests/test_ocfs2.py deleted file mode 100644 index 603c68d6c5..0000000000 --- a/test/unittests/test_ocfs2.py +++ /dev/null @@ -1,465 +0,0 @@ -import logging -import unittest -try: - from unittest import mock -except ImportError: - import mock -from crmsh import ocfs2, utils, ra, constants - -logging.basicConfig(level=logging.INFO) - -class TestOCFS2Manager(unittest.TestCase): - """ - Unitary tests for crmsh.bootstrap.SBDManager - """ - - @classmethod - def setUpClass(cls): - """ - Global setUp. - """ - - def setUp(self): - """ - Test setUp. - """ - context1 = mock.Mock(ocfs2_devices=[]) - self.ocfs2_inst1 = ocfs2.OCFS2Manager(context1) - - context2 = mock.Mock(ocfs2_devices=[], - stage="ocfs2", - yes_to_all=True) - self.ocfs2_inst2 = ocfs2.OCFS2Manager(context2) - - context3 = mock.Mock(ocfs2_devices=["/dev/sdb2", "/dev/sdc2"], - use_cluster_lvm2=False) - self.ocfs2_inst3 = ocfs2.OCFS2Manager(context3) - - context4 = mock.Mock(ocfs2_devices=[], - use_cluster_lvm2=True) - self.ocfs2_inst4 = ocfs2.OCFS2Manager(context4) - - context5 = mock.Mock(ocfs2_devices=["/dev/sda2", "/dev/sda2"]) - self.ocfs2_inst5 = ocfs2.OCFS2Manager(context5) - - context6 = mock.Mock(ocfs2_devices=["/dev/sda2"], - mount_point="/data") - self.ocfs2_inst6 = ocfs2.OCFS2Manager(context6) - - context7 = mock.Mock(ocfs2_devices=["/dev/sdb2"], - use_cluster_lvm2=True) - self.ocfs2_inst7 = ocfs2.OCFS2Manager(context7) - - def tearDown(self): - """ - Test tearDown. - """ - - @classmethod - def tearDownClass(cls): - """ - Global tearDown. - """ - - @mock.patch('crmsh.utils.package_is_installed') - def test_verify_packages(self, mock_installed): - mock_installed.side_effect = [True, False] - with self.assertRaises(ValueError) as err: - self.ocfs2_inst1._verify_packages(use_cluster_lvm2=True) - self.assertEqual("Missing required package for configuring OCFS2: lvm2-lockd", str(err.exception)) - mock_installed.assert_has_calls([ - mock.call("ocfs2-tools"), - mock.call("lvm2-lockd") - ]) - - def test_verify_options_stage_miss_option(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst2._verify_options() - self.assertEqual("ocfs2 stage require -o option", str(err.exception)) - - def test_verify_options_two_devices(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst3._verify_options() - self.assertEqual("Without Cluster LVM2 (-C option), -o option only support one device", str(err.exception)) - - def test_verify_options_only_C(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst4._verify_options() - self.assertEqual("-C option only valid together with -o option", str(err.exception)) - - @mock.patch('crmsh.utils.has_mount_point_used') - def test_verify_options_mount(self, mock_mount): - mock_mount.return_value = True - with self.assertRaises(ValueError) as err: - self.ocfs2_inst6._verify_options() - self.assertEqual("Mount point /data already mounted", str(err.exception)) - mock_mount.assert_called_once_with("/data") - - @mock.patch('crmsh.utils.is_block_device') - def test_verify_devices_not_block(self, mock_is_block): - mock_is_block.return_value = False - with self.assertRaises(ValueError) as err: - self.ocfs2_inst3._verify_devices() - self.assertEqual("/dev/sdb2 doesn't look like a block device", str(err.exception)) - mock_is_block.assert_called_once_with("/dev/sdb2") - - @mock.patch('crmsh.utils.is_dev_used_for_lvm') - @mock.patch('crmsh.utils.is_block_device') - def test_verify_devices_lvm(self, mock_is_block, mock_lvm): - mock_lvm.return_value = True - mock_is_block.return_value = True - with self.assertRaises(ValueError) as err: - self.ocfs2_inst7._verify_devices() - self.assertEqual("/dev/sdb2 is a Logical Volume, cannot be used with the -C option", str(err.exception)) - mock_is_block.assert_called_once_with("/dev/sdb2") - mock_lvm.assert_called_once_with("/dev/sdb2") - - @mock.patch('crmsh.utils.has_disk_mounted') - @mock.patch('crmsh.utils.is_dev_used_for_lvm') - @mock.patch('crmsh.utils.is_block_device') - def test_verify_devices_mounted(self, mock_is_block, mock_lvm, mock_mounted): - mock_lvm.return_value = False - mock_is_block.return_value = True - mock_mounted.return_value = True - with self.assertRaises(ValueError) as err: - self.ocfs2_inst7._verify_devices() - self.assertEqual("/dev/sdb2 already mounted", str(err.exception)) - mock_is_block.assert_called_once_with("/dev/sdb2") - mock_lvm.assert_called_once_with("/dev/sdb2") - mock_mounted.assert_called_once_with("/dev/sdb2") - - def test_check_if_already_configured_return(self): - self.ocfs2_inst3._check_if_already_configured() - - @mock.patch('logging.Logger.info') - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_check_if_already_configured(self, mock_run, mock_info): - mock_run.return_value = "data xxx fstype=ocfs2 sss" - with self.assertRaises(utils.TerminateSubCommand): - self.ocfs2_inst2._check_if_already_configured() - mock_run.assert_called_once_with("crm configure show") - mock_info.assert_called_once_with("Already configured OCFS2 related resources") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_devices') - @mock.patch('crmsh.ocfs2.OCFS2Manager._check_if_already_configured') - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_options') - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages') - def test_static_verify(self, mock_verify_packages, mock_verify_options, mock_configured, mock_verify_devices): - self.ocfs2_inst3._static_verify() - mock_verify_packages.assert_called_once_with(False) - mock_verify_options.assert_called_once_with() - mock_configured.assert_called_once_with() - mock_verify_devices.assert_called_once_with() - - def test_dynamic_raise_error(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst2._dynamic_raise_error("error messages") - self.assertEqual("error messages", str(err.exception)) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error') - @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config') - @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled') - def test_check_sbd_and_ocfs2_dev(self, mock_enabled, mock_get_device, mock_error): - mock_enabled.return_value = True - mock_get_device.return_value = ["/dev/sdb2"] - self.ocfs2_inst3._check_sbd_and_ocfs2_dev() - mock_enabled.assert_called_once_with("sbd.service") - mock_get_device.assert_called_once_with() - mock_error.assert_called_once_with("/dev/sdb2 cannot be the same with SBD device") - - @mock.patch('crmsh.bootstrap.confirm') - @mock.patch('crmsh.utils.get_dev_fs_type') - @mock.patch('crmsh.utils.has_dev_partitioned') - def test_confirm_to_overwrite_ocfs2_dev(self, mock_has_parted, mock_fstype, mock_confirm): - mock_has_parted.side_effect = [True, False] - mock_fstype.return_value = "ext4" - mock_confirm.side_effect = [True, False] - with self.assertRaises(utils.TerminateSubCommand) as err: - self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev() - mock_has_parted.assert_has_calls([ - mock.call("/dev/sdb2"), - mock.call("/dev/sdc2") - ]) - mock_fstype.assert_called_once_with("/dev/sdc2") - mock_confirm.assert_has_calls([ - mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"), - mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?") - ]) - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - @mock.patch('crmsh.bootstrap.confirm') - @mock.patch('crmsh.utils.get_dev_fs_type') - @mock.patch('crmsh.utils.has_dev_partitioned') - def test_confirm_to_overwrite_ocfs2_dev_confirmed(self, mock_has_parted, mock_fstype, mock_confirm, mock_run): - mock_has_parted.side_effect = [True, False] - mock_fstype.return_value = "ext4" - mock_confirm.side_effect = [True, True] - self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev() - mock_has_parted.assert_has_calls([ - mock.call("/dev/sdb2"), - mock.call("/dev/sdc2") - ]) - mock_fstype.assert_called_once_with("/dev/sdc2") - mock_confirm.assert_has_calls([ - mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"), - mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?") - ]) - mock_run.assert_has_calls([ - mock.call("wipefs -a /dev/sdb2"), - mock.call("wipefs -a /dev/sdc2") - ]) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error') - @mock.patch('crmsh.utils.has_stonith_running') - def test_dynamic_verify_error(self, mock_has_stonith, mock_error): - mock_has_stonith.return_value = False - mock_error.side_effect = SystemExit - with self.assertRaises(SystemExit): - self.ocfs2_inst3._dynamic_verify() - mock_has_stonith.assert_called_once_with() - mock_error.assert_called_once_with("OCFS2 requires stonith device configured and running") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._confirm_to_overwrite_ocfs2_dev') - @mock.patch('crmsh.ocfs2.OCFS2Manager._check_sbd_and_ocfs2_dev') - @mock.patch('crmsh.utils.has_stonith_running') - def test_dynamic_verify(self, mock_has_stonith, mock_check_dev, mock_confirm): - mock_has_stonith.return_value = True - self.ocfs2_inst3._dynamic_verify() - mock_has_stonith.assert_called_once_with() - mock_check_dev.assert_called_once_with() - mock_confirm.assert_called_once_with() - - @mock.patch('crmsh.utils.gen_unused_id') - def test_gen_ra_scripts(self, mock_gen_unused): - self.ocfs2_inst3.exist_ra_id_list = [] - mock_gen_unused.return_value = "g1" - res = self.ocfs2_inst3._gen_ra_scripts("GROUP", {"id": "g1", "ra_string": "d vip"}) - assert res == ("g1", "\ngroup g1 d vip") - mock_gen_unused.assert_called_once_with([], "g1") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - @mock.patch('crmsh.corosync.get_value') - @mock.patch('crmsh.log.LoggerUtils.status_long') - def test_mkfs(self, mock_long, mock_get_value, mock_run): - mock_get_value.return_value = "hacluster" - self.ocfs2_inst3._mkfs("/dev/sdb2") - mock_long.assert_called_once_with(" Creating OCFS2 filesystem for /dev/sdb2") - mock_get_value.assert_called_once_with("totem.cluster_name") - mock_run.assert_called_once_with("mkfs.ocfs2 --cluster-stack pcmk --cluster-name hacluster -N 8 -x /dev/sdb2") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_vg_change(self, mock_run): - self.ocfs2_inst3.vg_id = "vg1" - with self.ocfs2_inst3._vg_change(): - pass - mock_run.assert_has_calls([ - mock.call("vgchange -ay vg1"), - mock.call("vgchange -an vg1") - ]) - - @mock.patch('crmsh.utils.get_pe_number') - @mock.patch('crmsh.utils.gen_unused_id') - @mock.patch('crmsh.utils.get_all_vg_name') - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - @mock.patch('crmsh.log.LoggerUtils.status_long') - def test_create_lv(self, mock_long, mock_run, mock_all_vg, mock_unused, mock_pe_num): - mock_all_vg.return_value = [] - mock_unused.return_value = "vg1" - mock_pe_num.return_value = 1234 - res = self.ocfs2_inst3._create_lv() - self.assertEqual(res, "/dev/vg1/ocfs2-lv") - mock_run.assert_has_calls([ - mock.call("pvcreate /dev/sdb2 /dev/sdc2 -y"), - mock.call("vgcreate --shared vg1 /dev/sdb2 /dev/sdc2 -y"), - mock.call("lvcreate -l 1234 vg1 -n ocfs2-lv -y") - ]) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_gen_group_and_clone_scripts(self, mock_gen): - mock_gen.side_effect = [("id1", "group_script\n"), ("id2", "clone_script\n")] - res = self.ocfs2_inst3._gen_group_and_clone_scripts(["ra1", "ra2"]) - self.assertEqual(res, "group_script\nclone_script\n") - mock_gen.assert_has_calls([ - mock.call('GROUP', {'id': 'ocfs2-group', 'ra_string': 'ra1 ra2'}), - mock.call('CLONE', {'id': 'ocfs2-clone', 'group_id': 'id1'}) - ]) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_gen_fs_scripts(self, mock_gen): - mock_gen.return_value = "scripts" - self.ocfs2_inst3.mount_point = "/data" - self.ocfs2_inst3.target_device = "/dev/sda1" - res = self.ocfs2_inst3._gen_fs_scripts() - self.assertEqual(res, "scripts") - mock_gen.assert_called_once_with("Filesystem", {'id': 'ocfs2-clusterfs', 'mnt_point': '/data', 'fs_type': 'ocfs2', 'device': '/dev/sda1'}) - - @mock.patch('crmsh.bootstrap.wait_for_resource') - @mock.patch('crmsh.utils.append_res_to_group') - @mock.patch('crmsh.bootstrap.crm_configure_load') - def test_load_append_and_wait(self, mock_load, mock_append, mock_wait): - self.ocfs2_inst3.group_id = "g1" - self.ocfs2_inst3._load_append_and_wait("scripts", "res_id", "messages data") - mock_load.assert_called_once_with("update", "scripts") - mock_append.assert_called_once_with("g1", "res_id") - mock_wait.assert_called_once_with("messages data", "res_id") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_group_and_clone_scripts') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_config_dlm(self, mock_gen_ra, mock_gen_group, mock_load_wait): - mock_gen_ra.return_value = ("dlm_id", "dlm_scripts\n") - mock_gen_group.return_value = "group_scripts\n" - self.ocfs2_inst3._config_dlm() - mock_gen_ra.assert_called_once_with("DLM", {"id": "ocfs2-dlm"}) - mock_gen_group.assert_called_once_with(["dlm_id"]) - mock_load_wait.assert_called_once_with("dlm_scripts\ngroup_scripts\n", "dlm_id", " Wait for DLM(dlm_id) start", need_append=False) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_config_lvmlockd(self, mock_gen_ra, mock_load_wait): - mock_gen_ra.return_value = ("ra_id", "ra_scripts\n") - self.ocfs2_inst3._config_lvmlockd() - mock_gen_ra.assert_called_once_with("LVMLockd", {"id": "ocfs2-lvmlockd"}) - mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMLockd(ra_id) start") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_config_lvmactivate(self, mock_gen_ra, mock_load_wait): - mock_gen_ra.return_value = ("ra_id", "ra_scripts\n") - self.ocfs2_inst3.vg_id = "vg1" - self.ocfs2_inst3._config_lvmactivate() - mock_gen_ra.assert_called_once_with("LVMActivate", {"id": "ocfs2-lvmactivate", "vgname": "vg1"}) - mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMActivate(ra_id) start") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_fs_scripts') - @mock.patch('crmsh.utils.mkdirp') - def test_config_fs(self, mock_mkdir, mock_gen_fs, mock_load_wait): - mock_gen_fs.return_value = ("ra_id", "ra_scripts\n") - self.ocfs2_inst3.mount_point = "/data" - self.ocfs2_inst3._config_fs() - mock_mkdir.assert_called_once_with("/data") - mock_gen_fs.assert_called_once_with() - mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for Filesystem(ra_id) start") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmactivate') - @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._vg_change') - @mock.patch('crmsh.ocfs2.OCFS2Manager._create_lv') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmlockd') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm') - def test_config_resource_stack_lvm2(self, mock_dlm, mock_lvmlockd, mock_lv, mock_vg, mock_mkfs, mock_lvmactivate, mock_fs): - mock_lv.return_value = "/dev/sda1" - self.ocfs2_inst3._config_resource_stack_lvm2() - mock_dlm.assert_called_once_with() - mock_lvmlockd.assert_called_once_with() - mock_lv.assert_called_once_with() - mock_mkfs.assert_called_once_with("/dev/sda1") - mock_lvmactivate.assert_called_once_with() - mock_fs.assert_called_once_with() - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm') - def test_config_resource_stack_ocfs2_along(self, mock_dlm, mock_mkfs, mock_fs): - self.ocfs2_inst3._config_resource_stack_ocfs2_along() - mock_dlm.assert_called_once_with() - mock_mkfs.assert_called_once_with("/dev/sdb2") - mock_fs.assert_called_once_with() - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_lvm2') - @mock.patch('crmsh.utils.set_property') - @mock.patch('crmsh.utils.get_property') - @mock.patch('crmsh.utils.all_exist_id') - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify') - @mock.patch('logging.Logger.info') - def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_lvm2): - mock_all_id.return_value = [] - mock_get.return_value = None - self.ocfs2_inst7.mount_point = "/data" - self.ocfs2_inst7.target_device = "/dev/vg1/lv1" - self.ocfs2_inst7.init_ocfs2() - mock_status.assert_has_calls([ - mock.call("Configuring OCFS2"), - mock.call(' \'no-quorum-policy\' is changed to "freeze"'), - mock.call(' OCFS2 device %s mounted on %s', '/dev/vg1/lv1', '/data') - ]) - mock_dynamic_verify.assert_called_once_with() - mock_all_id.assert_called_once_with() - mock_lvm2.assert_called_once_with() - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_ocfs2_along') - @mock.patch('crmsh.utils.set_property') - @mock.patch('crmsh.utils.get_property') - @mock.patch('crmsh.utils.all_exist_id') - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify') - @mock.patch('logging.Logger.info') - def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_ocfs2): - mock_all_id.return_value = [] - mock_get.return_value = None - self.ocfs2_inst3.mount_point = "/data" - self.ocfs2_inst3.target_device = "/dev/sda1" - self.ocfs2_inst3.init_ocfs2() - mock_status.assert_has_calls([ - mock.call("Configuring OCFS2"), - mock.call(' \'no-quorum-policy\' is changed to "freeze"'), - mock.call(' OCFS2 device %s mounted on %s', '/dev/sda1', '/data') - ]) - mock_dynamic_verify.assert_called_once_with() - mock_all_id.assert_called_once_with() - mock_ocfs2.assert_called_once_with() - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_find_target_on_join_none(self, mock_run): - mock_run.return_value = "data" - res = self.ocfs2_inst3._find_target_on_join("node1") - assert res is None - mock_run.assert_called_once_with("crm configure show", "node1") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_find_target_on_join_exception(self, mock_run): - mock_run.return_value = """ -params directory="/srv/clusterfs" fstype=ocfs2 - """ - with self.assertRaises(ValueError) as err: - self.ocfs2_inst3._find_target_on_join("node1") - self.assertEqual("Filesystem require configure device", str(err.exception)) - mock_run.assert_called_once_with("crm configure show", "node1") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_find_target_on_join(self, mock_run): - mock_run.return_value = """ -params directory="/srv/clusterfs" fstype=ocfs2 device="/dev/sda2" - """ - res = self.ocfs2_inst3._find_target_on_join("node1") - self.assertEqual(res, "/dev/sda2") - mock_run.assert_called_once_with("crm configure show", "node1") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join') - def test_join_ocfs2_return(self, mock_find): - mock_find.return_value = None - self.ocfs2_inst3.join_ocfs2("node1") - mock_find.assert_called_once_with("node1") - - @mock.patch('crmsh.utils.compare_uuid_with_peer_dev') - @mock.patch('crmsh.utils.is_dev_a_plain_raw_disk_or_partition') - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages') - @mock.patch('crmsh.xmlutil.CrmMonXmlParser') - @mock.patch('crmsh.log.LoggerUtils.status_long') - @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join') - def test_join_ocfs2(self, mock_find, mock_long, mock_parser, mock_verify_packages, mock_is_mapper, mock_compare): - mock_find.return_value = "/dev/sda2" - mock_parser("node1").is_resource_configured.return_value = False - mock_is_mapper.return_value = True - self.ocfs2_inst3.join_ocfs2("node1") - mock_find.assert_called_once_with("node1") - mock_verify_packages.assert_called_once_with(False) - mock_is_mapper.assert_called_once_with("/dev/sda2", "node1") - mock_compare.assert_called_once_with(["/dev/sda2"], "node1") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._static_verify') - def test_verify_ocfs2(self, mock_static_verify): - context1 = mock.Mock(ocfs2_devices=[]) - ocfs2.OCFS2Manager.verify_ocfs2(context1) - mock_static_verify.assert_called_once_with() diff --git a/test/unittests/test_report_collect.py b/test/unittests/test_report_collect.py index 7f57dac501..970465fa16 100644 --- a/test/unittests/test_report_collect.py +++ b/test/unittests/test_report_collect.py @@ -380,74 +380,6 @@ def test_collect_ratraces(self, mock_find, mock_mkdirp, mock_copy, mock_logger, ]) mock_logger.debug.assert_called_with(f'Dump RA trace files into {mock_real_path.return_value}') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_lsof_ocfs2_device(self, mock_run): - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mount_data = """ -/dev/vda3 on /home type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota) -tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=169544k,nr_inodes=42386,mode=700,inode64) -/dev/sda7 on /srv/clusterfs type ocfs2 (rw,relatime,heartbeat=non - """ - mock_run_inst.get_stdout_stderr.side_effect = [(0, mount_data, None), (0, "data", None)] - res = collect.lsof_ocfs2_device() - self.assertEqual(res, "\n\n#=====[ Command ] ==========================#\n# lsof /dev/sda7\ndata") - mock_run_inst.get_stdout_stderr.assert_has_calls([ - mock.call("mount"), - mock.call("lsof /dev/sda7") - ]) - - @mock.patch('crmsh.report.utils.get_cmd_output') - @mock.patch('os.path.exists') - @mock.patch('shutil.which') - def test_ocfs2_commands_output(self, mock_which, mock_exists, mock_run): - mock_which.side_effect = [False for i in range(5)] + [True, True] - mock_exists.return_value = False - mock_run.return_value = "data" - res = collect.ocfs2_commands_output() - self.assertEqual(res, "\n\n#===== [ Command ] ==========================#\n# mount\ndata") - - @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger) - @mock.patch('crmsh.utils.str2file') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_collect_ocfs2_info_error(self, mock_run, mock_str2file, mock_debug2): - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mock_run_inst.get_stdout_stderr.return_value = (1, None, "error") - mock_ctx_inst = mock.Mock(work_dir="/opt/workdir") - collect.collect_ocfs2_info(mock_ctx_inst) - mock_str2file.assert_called_once_with('Failed to run "mounted.ocfs2 -d": error', '/opt/workdir/ocfs2.txt') - - @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger) - @mock.patch('crmsh.utils.str2file') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_collect_ocfs2_info_no_found(self, mock_run, mock_str2file, mock_debug2): - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mock_run_inst.get_stdout_stderr.return_value = (0, "data", None) - mock_ctx_inst = mock.Mock(work_dir="/opt/workdir") - collect.collect_ocfs2_info(mock_ctx_inst) - mock_str2file.assert_called_once_with('No ocfs2 partitions found', '/opt/workdir/ocfs2.txt') - - @mock.patch('crmsh.report.utils.real_path') - @mock.patch('crmsh.report.collect.ocfs2_commands_output') - @mock.patch('crmsh.report.collect.lsof_ocfs2_device') - @mock.patch('crmsh.report.collect.dump_D_process') - @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger) - @mock.patch('crmsh.utils.str2file') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_collect_ocfs2_info(self, mock_run, mock_str2file, mock_debug2, mock_D, mock_lsof, mock_output, mock_real_path): - mock_real_path.return_value = constants.OCFS2_F - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mock_run_inst.get_stdout_stderr.return_value = (0, "line1\nline2", None) - mock_D.return_value = "data_D\n" - mock_lsof.return_value = "data_lsof\n" - mock_output.return_value = "data_output\n" - mock_ctx_inst = mock.Mock(work_dir="/opt/workdir") - collect.collect_ocfs2_info(mock_ctx_inst) - mock_str2file.assert_called_once_with('data_D\ndata_lsof\ndata_output\n', '/opt/workdir/ocfs2.txt') - @mock.patch('crmsh.report.utils.real_path') @mock.patch('logging.Logger.debug') @mock.patch('crmsh.utils.str2file') @@ -616,3 +548,87 @@ def test_collect_qdevice_info(self, mock_quorum, mock_service, mock_qdevice, moc mock_real_path.return_value = "/opt/workdir/qdevice.txt" collect.collect_qdevice_info(mock_ctx_inst) mock_debug.assert_called_once_with(f"Dump quorum/qdevice/qnetd information into {mock_real_path.return_value}") + + @mock.patch("logging.Logger.error") + @mock.patch('crmsh.utils.str2file') + @mock.patch("crmsh.report.utils.real_path") + @mock.patch("logging.Logger.debug") + @mock.patch("crmsh.report.collect.cluster_fs_commands_output") + @mock.patch("crmsh.report.collect.lsof_cluster_fs_device") + @mock.patch("crmsh.report.collect.dump_D_process") + @mock.patch("crmsh.report.collect.ShellUtils") + def test_collect_cluster_fs_info(self, mock_run, mock_dump, mock_lsof, mock_cluster, mock_debug, mock_real_path, mock_str2file, mock_error): + mock_run_inst = mock.Mock() + mock_run.return_value = mock_run_inst + mock_run_inst.get_stdout_stderr.side_effect = [ + (1, None, "error"), + (0, "data", None) + ] + mock_dump.return_value = "dump_data\n" + mock_lsof.return_value = "lsof_data\n" + mock_cluster.return_value = "cluster_data" + mock_ctx_inst = mock.Mock(work_dir="/opt/work") + mock_real_path.side_effect = ["/path/ocfs2.txt", "/path/gfs2.txt"] + + collect.collect_cluster_fs_info(mock_ctx_inst) + + mock_debug.assert_has_calls([ + mock.call('Dump %s information into %s', 'OCFS2', '/path/ocfs2.txt'), + mock.call('Dump %s information into %s', 'GFS2', '/path/gfs2.txt') + ]) + mock_str2file.assert_has_calls([ + mock.call('Failed to run "mounted.ocfs2 -d": error', '/opt/work/ocfs2.txt'), + mock.call('dump_data\nlsof_data\ncluster_data', '/opt/work/gfs2.txt') + ]) + + @mock.patch('crmsh.report.collect.ShellUtils') + def test_dump_D_process_empty(self, mock_run): + mock_run_inst = mock.Mock() + mock_run.return_value = mock_run_inst + mock_run_inst.get_stdout_stderr.return_value = (0, None, None) + res = collect.dump_D_process() + self.assertEqual(res, "Dump D-state process stack: 0\n") + + @mock.patch('crmsh.report.collect.ShellUtils') + def test_dump_D_process(self, mock_run): + mock_run_inst = mock.Mock() + mock_run.return_value = mock_run_inst + mock_run_inst.get_stdout_stderr.side_effect = [ + (0, "1000", None), + (0, "data1", None), + (0, "data2", None) + ] + res = collect.dump_D_process() + self.assertEqual(res, "Dump D-state process stack: 1\npid: 1000 comm: data1\ndata2\n\n") + mock_run_inst.get_stdout_stderr.assert_has_calls([ + mock.call("ps aux|awk '$8 ~ /^D/{print $2}'"), + mock.call('cat /proc/1000/comm'), + mock.call('cat /proc/1000/stack') + ]) + + @mock.patch('crmsh.report.collect.ShellUtils') + def test_lsof_cluster_fs_device(self, mock_run): + mock_run_inst = mock.Mock() + mock_run.return_value = mock_run_inst + mount_data = """ +/dev/vda3 on /home type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota) +tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=169544k,nr_inodes=42386,mode=700,inode64) +/dev/sda7 on /srv/clusterfs type ocfs2 (rw,relatime,heartbeat=non + """ + mock_run_inst.get_stdout_stderr.side_effect = [(0, mount_data, None), (0, "data", None)] + res = collect.lsof_cluster_fs_device("OCFS2") + self.assertEqual(res, "\n\n#=====[ Command ] ==========================#\n# lsof /dev/sda7\ndata") + mock_run_inst.get_stdout_stderr.assert_has_calls([ + mock.call("mount"), + mock.call("lsof /dev/sda7") + ]) + + @mock.patch('crmsh.report.utils.get_cmd_output') + @mock.patch('os.path.exists') + @mock.patch('shutil.which') + def test_cluster_fs_commands_output(self, mock_which, mock_exists, mock_run): + mock_which.side_effect = [False for i in range(5)] + [True, True] + mock_exists.return_value = False + mock_run.return_value = "data" + res = collect.cluster_fs_commands_output("OCFS2") + self.assertEqual(res, "\n\n#===== [ Command ] ==========================#\n# mounted.ocfs2 -f\ndata")