Skip to content

Commit

Permalink
Add performance metrics collection support to VMAX drivers (#623)
Browse files Browse the repository at this point in the history
  • Loading branch information
joseph-v authored Aug 10, 2021
1 parent 62dd6f1 commit baa386e
Show file tree
Hide file tree
Showing 7 changed files with 957 additions and 428 deletions.
100 changes: 79 additions & 21 deletions delfin/drivers/dell_emc/vmax/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

from delfin import exception
from delfin.common import constants
from delfin.drivers.dell_emc.vmax import constants as consts
from delfin.drivers.dell_emc.vmax import rest, perf_utils

LOG = log.getLogger(__name__)
Expand Down Expand Up @@ -164,7 +165,7 @@ def list_storage_pools(self, storage_id):
if int(self.uni_version) < 90:
total_cap = pool_info['total_usable_cap_gb'] * units.Gi
used_cap = pool_info['total_allocated_cap_gb'] * units.Gi
subscribed_cap =\
subscribed_cap = \
pool_info['total_subscribed_cap_gb'] * units.Gi
else:
srp_cap = pool_info['srp_capacity']
Expand Down Expand Up @@ -252,7 +253,7 @@ def list_volumes(self, storage_id):
sg = vol['storageGroupId'][0]
sg_info = self.rest.get_storage_group(
self.array_id, self.uni_version, sg)
v['native_storage_pool_id'] =\
v['native_storage_pool_id'] = \
sg_info.get('srp', default_srps[emulation_type])
v['compressed'] = sg_info.get('compression', False)
else:
Expand Down Expand Up @@ -386,29 +387,86 @@ def clear_alert(self, sequence_number):
return self.rest.clear_alert(sequence_number, version=self.uni_version,
array=self.array_id)

def get_array_performance_metrics(self, storage_id, start_time, end_time):
def get_storage_metrics(self, storage_id, metrics, start_time, end_time):
"""Get performance metrics."""
try:
# Fetch VMAX Array Performance data from REST client
# TODO :
# Check whether array is registered for performance collection
# in unisphere
perf_data = self.rest.get_array_performance_metrics(
self.array_id, start_time, end_time)
# parse VMAX REST response to metric->values map
metrics_value_map = perf_utils.parse_performance_data(perf_data)
# prepare labels required for array_leval performance data
labels = {'storage_id': storage_id, 'resource_type': 'array'}
# map to unified delifn metrics
delfin_metrics = perf_utils.\
map_array_perf_metrics_to_delfin_metrics(metrics_value_map)
perf_list = self.rest.get_storage_metrics(
self.array_id, metrics, start_time, end_time)

return perf_utils.construct_metrics(storage_id,
consts.STORAGE_METRICS,
consts.STORAGE_CAP,
perf_list)
except Exception:
LOG.error("Failed to get STORAGE metrics for VMAX")
raise

def get_pool_metrics(self, storage_id, metrics, start_time, end_time):
"""Get performance metrics."""
try:
perf_list = self.rest.get_pool_metrics(
self.array_id, metrics, start_time, end_time)

metrics_array = perf_utils.construct_metrics(
storage_id, consts.POOL_METRICS, consts.POOL_CAP, perf_list)

return metrics_array
except Exception:
LOG.error("Failed to get STORAGE POOL metrics for VMAX")
raise

def get_port_metrics(self, storage_id, metrics, start_time, end_time):
"""Get performance metrics."""
try:
be_perf_list, fe_perf_list, rdf_perf_list = \
self.rest.get_port_metrics(self.array_id,
metrics, start_time, end_time)

metrics_array = []
for key in constants.DELFIN_ARRAY_METRICS:
m = constants.metric_struct(name=key, labels=labels,
values=delfin_metrics[key])
metrics_array.append(m)
metrics_list = perf_utils.construct_metrics(
storage_id, consts.BEPORT_METRICS,
consts.PORT_CAP, be_perf_list)
metrics_array.extend(metrics_list)

metrics_list = perf_utils.construct_metrics(
storage_id, consts.FEPORT_METRICS,
consts.PORT_CAP, fe_perf_list)
metrics_array.extend(metrics_list)

metrics_list = perf_utils.construct_metrics(
storage_id, consts.RDFPORT_METRICS,
consts.PORT_CAP, rdf_perf_list)
metrics_array.extend(metrics_list)
return metrics_array
except Exception:
LOG.error("Failed to get PORT metrics for VMAX")
raise

def get_controller_metrics(self, storage_id,
metrics, start_time, end_time):
"""Get performance metrics."""
try:
be_perf_list, fe_perf_list, rdf_perf_list = self.rest.\
get_controller_metrics(self.array_id,
metrics, start_time, end_time)

metrics_array = []
metrics_list = perf_utils.construct_metrics(
storage_id, consts.BEDIRECTOR_METRICS,
consts.CONTROLLER_CAP, be_perf_list)
metrics_array.extend(metrics_list)

metrics_list = perf_utils.construct_metrics(
storage_id, consts.FEDIRECTOR_METRICS,
consts.CONTROLLER_CAP, fe_perf_list)
metrics_array.extend(metrics_list)

metrics_list = perf_utils.construct_metrics(
storage_id, consts.RDFDIRECTOR_METRICS,
consts.CONTROLLER_CAP, rdf_perf_list)
metrics_array.extend(metrics_list)

return metrics_array
except Exception:
LOG.error("Failed to get performance metrics data for VMAX")
LOG.error("Failed to get CONTROLLER metrics for VMAX")
raise
139 changes: 130 additions & 9 deletions delfin/drivers/dell_emc/vmax/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,133 @@
# minimum interval supported by VMAX
VMAX_PERF_MIN_INTERVAL = 5

ARRAY_METRICS = ["HostIOs",
"HostMBWritten",
"ReadResponseTime",
"HostMBReads",
"HostReads",
"HostWrites",
"WriteResponseTime"
]
VMAX_REST_TARGET_URI_ARRAY_PERF = '/performance/Array/metrics'
BEDIRECTOR_METRICS = {
'iops': 'IOs',
'throughput': 'MBs',
'readThroughput': 'MBRead',
'writeThroughput': 'MBWritten',
}
FEDIRECTOR_METRICS = {
'iops': 'HostIOs',
'throughput': 'HostMBs',
}
RDFDIRECTOR_METRICS = {
'iops': 'IOs',
'throughput': 'MBSentAndReceived',
'readThroughput': 'MBRead',
'writeThroughput': 'MBWritten',
'responseTime': 'AverageIOServiceTime',
}
BEPORT_METRICS = {
'iops': 'IOs',
'throughput': 'MBs',
'readThroughput': 'MBRead',
'writeThroughput': 'MBWritten',
}
FEPORT_METRICS = {
'iops': 'IOs',
'throughput': 'MBs',
'readThroughput': 'MBRead',
'writeThroughput': 'MBWritten',
'responseTime': 'ResponseTime',
}
RDFPORT_METRICS = {
'iops': 'IOs',
'throughput': 'MBs',
'readThroughput': 'MBRead',
'writeThroughput': 'MBWritten',
}
POOL_METRICS = {
'iops': 'HostIOs',
'readIops': 'HostReads',
'writeIops': 'HostWrites',
'throughput': 'HostMBs',
'readThroughput': 'HostMBReads',
'writeThroughput': 'HostMBWritten',
'responseTime': 'ResponseTime',
}
STORAGE_METRICS = {
'iops': 'HostIOs',
'readIops': 'HostReads',
'writeIops': 'HostWrites',
'throughput': 'HostMBs',
'readThroughput': 'HostMBReads',
'writeThroughput': 'HostMBWritten',
}

IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Input/output operations per second"
}
READ_IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Read input/output operations per second"
}
WRITE_IOPS_DESCRIPTION = {
"unit": "IOPS",
"description": "Write input/output operations per second"
}
THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data is "
"successfully transferred in MB/s"
}
READ_THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data read is "
"successfully transferred in MB/s"
}
WRITE_THROUGHPUT_DESCRIPTION = {
"unit": "MB/s",
"description": "Represents how much data write is "
"successfully transferred in MB/s"
}
RESPONSE_TIME_DESCRIPTION = {
"unit": "ms",
"description": "Average time taken for an IO "
"operation in ms"
}
IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of IO requests in KB"
}
READ_IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of read IO requests in KB"
}
WRITE_IO_SIZE_DESCRIPTION = {
"unit": "KB",
"description": "The average size of write IO requests in KB"
}
STORAGE_CAP = {
"iops": IOPS_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
"throughput": THROUGHPUT_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
}
POOL_CAP = {
"iops": IOPS_DESCRIPTION,
"readIops": READ_IOPS_DESCRIPTION,
"writeIops": WRITE_IOPS_DESCRIPTION,
"throughput": THROUGHPUT_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
}
CONTROLLER_CAP = {
"iops": IOPS_DESCRIPTION,
"throughput": THROUGHPUT_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
}
PORT_CAP = {
"iops": IOPS_DESCRIPTION,
"throughput": THROUGHPUT_DESCRIPTION,
"readThroughput": READ_THROUGHPUT_DESCRIPTION,
"writeThroughput": WRITE_THROUGHPUT_DESCRIPTION,
"responseTime": RESPONSE_TIME_DESCRIPTION,
}
111 changes: 36 additions & 75 deletions delfin/drivers/dell_emc/vmax/perf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,88 +12,49 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import time
from delfin.common import constants

from collections import Counter

from delfin.drivers.dell_emc.vmax import constants


def epoch_time_ms_now():
"""Get current time in epoch ms.
:returns: epoch time in milli seconds
"""
ms = int(time.time() * 1000)
return ms


def epoch_time_interval_ago(interval_seconds=constants.VMAX_PERF_MIN_INTERVAL):
"""Get epoch time in milliseconds before an interval
:param interval_seconds: interval in seconds
:returns: epoch time in milliseconds
"""
return int(epoch_time_ms_now() - (interval_seconds * 1000))


def generate_performance_payload(array, start_time, end_time, metrics):
"""Generate request payload for VMAX performance POST request
:param array: symmetrixID
:param start_time: start time for collection
:param end_time: end time for collection
:param metrics: metrics to be collected
:returns: payload dictionary
"""
return {'symmetrixId': str(array),
"endDate": end_time,
"startDate": start_time,
"metrics": metrics,
"dataFormat": "Average"}


def parse_performance_data(response):
def parse_performance_data(metrics):
"""Parse metrics response to a map
:param response: response from unispshere REST API
:param metrics: metrics from unispshere REST API
:returns: map with key as metric name and value as dictionary
containing {timestamp: value} for a the timestamps available
"""
metrics_map = {}
for metrics in response["resultList"]["result"]:
timestamp = metrics["timestamp"]
for key, value in metrics.items():
metrics_map[key] = metrics_map.get(key, {})
metrics_map[key][timestamp] = value
timestamp = metrics["timestamp"]
for key, value in metrics.items():
metrics_map[key] = metrics_map.get(key, {})
metrics_map[key][timestamp] = value
return metrics_map


def map_array_perf_metrics_to_delfin_metrics(metrics_value_map):
"""map vmax array performance metrics values to delfin metrics values
:param metrics_value_map: metric to values map of vmax metrics
:returns: map with key as delfin metric name and value as dictionary
containing {timestamp: value} for a the timestamps available
"""
# read and write response_time
read_response_values_dict = metrics_value_map.get('ReadResponseTime')
write_response_values_dict = metrics_value_map.get('WriteResponseTime')
if read_response_values_dict or write_response_values_dict:
response_time_values_dict = \
Counter(read_response_values_dict) + \
Counter(write_response_values_dict)
# bandwidth metrics
read_bandwidth_values_dict = metrics_value_map.get('HostMBReads')
write_bandwidth_values_dict = metrics_value_map.get('HostMBWritten')
if read_bandwidth_values_dict or write_bandwidth_values_dict:
bandwidth_values_dict = \
Counter(read_bandwidth_values_dict) +\
Counter(write_bandwidth_values_dict)
throughput_values_dict = metrics_value_map.get('HostIOs')
read_throughput_values_dict = metrics_value_map.get('HostReads')
write_throughput_values_dict = metrics_value_map.get('HostWrites')
# map values to delfin metrics spec
delfin_metrics = {'responseTime': response_time_values_dict,
'readThroughput': read_bandwidth_values_dict,
'writeThroughput': write_bandwidth_values_dict,
'requests': throughput_values_dict,
'readRequests': read_throughput_values_dict,
'writeRequests': write_throughput_values_dict,
'throughput': bandwidth_values_dict}
return delfin_metrics
def construct_metrics(storage_id, resource_metrics, unit_map, perf_list):
metrics_list = []
metrics_values = {}
for perf in perf_list:
collected_metrics_list = perf.get('metrics')
for collected_metrics in collected_metrics_list:
metrics_map = parse_performance_data(collected_metrics)

for key, value in resource_metrics.items():
metrics_map_value = metrics_map.get(value)
if metrics_map_value:
metrics_values[key] = metrics_values.get(key, {})
for k, v in metrics_map_value.items():
metrics_values[key][k] = v

for resource_key, resource_value in metrics_values.items():
labels = {
'storage_id': storage_id,
'resource_type': perf.get('resource_type'),
'resource_id': perf.get('resource_id'),
'resource_name': perf.get('resource_name'),
'type': 'RAW',
'unit': unit_map[resource_key]['unit']
}
metrics_res = constants.metric_struct(name=resource_key,
labels=labels,
values=resource_value)
metrics_list.append(metrics_res)
return metrics_list
Loading

0 comments on commit baa386e

Please sign in to comment.