diff --git a/common.json b/common.json index c538b0436924..3bc65d0842d5 100644 --- a/common.json +++ b/common.json @@ -4,7 +4,7 @@ "Jsonnet files should not include this file directly but use ci/common.jsonnet instead." ], - "mx_version": "7.27.5.1", + "mx_version": "7.27.5.3", "COMMENT.jdks": "When adding or removing JDKs keep in sync with JDKs in ci/common.jsonnet", "jdks": { diff --git a/compiler/ci/ci_common/benchmark-builders.jsonnet b/compiler/ci/ci_common/benchmark-builders.jsonnet index 2a16e6cacd30..8892dce6dc08 100644 --- a/compiler/ci/ci_common/benchmark-builders.jsonnet +++ b/compiler/ci/ci_common/benchmark-builders.jsonnet @@ -14,6 +14,7 @@ c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.dacapo + PR_bench_libgraal, c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.scala_dacapo + PR_bench_libgraal, c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.renaissance + PR_bench_libgraal, + c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.barista, c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.specjvm2008 + PR_bench_libgraal, c.on_demand + hw.e3 + jdk + cc.libgraal + bench.dacapo_size_variants, c.on_demand + hw.e3 + jdk + cc.libgraal + bench.scala_dacapo_size_variants, diff --git a/compiler/ci/ci_common/benchmark-suites.libsonnet b/compiler/ci/ci_common/benchmark-suites.libsonnet index 8ec21e488619..8042c3ade698 100644 --- a/compiler/ci/ci_common/benchmark-suites.libsonnet +++ b/compiler/ci/ci_common/benchmark-suites.libsonnet @@ -8,7 +8,7 @@ // convenient sets of benchmark suites for easy reuse groups:: { - open_suites:: unique_suites([$.awfy, $.dacapo, $.scala_dacapo, $.renaissance]), + open_suites:: unique_suites([$.awfy, $.dacapo, $.scala_dacapo, $.renaissance, $.barista]), spec_suites:: unique_suites([$.specjvm2008, $.specjbb2015]), jmh_micros_suites:: unique_suites([$.micros_graal_dist]), graal_internals_suites:: unique_suites([$.micros_graal_whitebox]), @@ -112,6 +112,47 @@ renaissance: self.renaissance_template(), + barista_template(suite_version=null, suite_name="barista", max_jdk_version=null, cmd_app_prefix=["hwloc-bind --cpubind node:0.core:0-3.pu:0 --membind node:0"], non_prefix_barista_args=[]):: cc.compiler_benchmark + { + suite:: suite_name, + local barista_version = "v0.2.0", + local suite_version_args = if suite_version != null then ["--bench-suite-version=" + suite_version] else [], + local prefix_barista_arg = if std.length(cmd_app_prefix) > 0 then [std.format("--cmd-app-prefix=%s", std.join(" ", cmd_app_prefix))] else [], + local all_barista_args = prefix_barista_arg + non_prefix_barista_args, + local barista_args_with_separator = if std.length(all_barista_args) > 0 then ["--"] + all_barista_args else [], + downloads+: { + "WRK": { "name": "wrk", "version": "a211dd5", platformspecific: true}, + "WRK2": { "name": "wrk2", "version": "2.1", platformspecific: true}, + "BARISTA_BENCHMARKS": { "name": "barista", "version": "0.2.0"} + }, + packages+: { + maven: "==3.8.6", + "pip:toml": "==0.10.2" + }, + setup: [ + ["set-export", "PATH", "$WRK:$PATH"], + ["set-export", "PATH", "$WRK2:$PATH"], + ["git", "clone", "--depth", "1", "--branch", barista_version, ["mx", "urlrewrite", "https://github.com/graalvm/barista-suite.git"], "$BARISTA_HOME"], + ["cp", "-r", "$BARISTA_BENCHMARKS/*", "$BARISTA_HOME"] // copy the prebuilt jar/nib files + ] + super.setup, + run+: [ + self.benchmark_cmd + ["barista:*"] + suite_version_args + ["--"] + self.extra_vm_args + barista_args_with_separator + ], + notify_emails+: ["andrija.kolic@oracle.com"], + timelimit: "1:20:00", + should_use_hwloc: false, // hwloc-bind is passed to barista with '--cmd-app-prefix' + environment+: { + BARISTA_HOME: "$BUILD_DIR/barista-suite", + XMX: "500m" + }, + min_jdk_version:: 8, + max_jdk_version:: max_jdk_version, + forks_batches:: 3, + bench_forks_per_batch:: 4, + forks_timelimit:: "3:30:00" + }, + + barista: self.barista_template(), + specjbb2015: cc.compiler_benchmark + c.heap.large_with_large_young_gen + bc.bench_max_threads + { suite:: "specjbb2015", downloads+: { diff --git a/sdk/mx.sdk/mx_sdk_benchmark.py b/sdk/mx.sdk/mx_sdk_benchmark.py index a4c1c3a5606a..d591af3dee86 100644 --- a/sdk/mx.sdk/mx_sdk_benchmark.py +++ b/sdk/mx.sdk/mx_sdk_benchmark.py @@ -84,6 +84,10 @@ lambda args: mx_benchmark.benchmark(["specjbb2015"] + args), '[-- [VM options] [-- [SPECjbb2015 options]]]' ], + 'barista': [ + lambda args: createBenchmarkShortcut("barista", args), + '[|*] [-- [VM options] [-- [Barista harness options]]]' + ], 'renaissance': [ lambda args: createBenchmarkShortcut("renaissance", args), '[|*] [-- [VM options] [-- [Renaissance options]]]' @@ -1182,6 +1186,372 @@ def rules(self, out, benchmarks, bmSuiteArgs): mx_benchmark.add_bm_suite(SpecJbb2015BenchmarkSuite()) +_baristaConfig = { + "benchmarks": { + "micronaut-hello-world": {}, + "micronaut-shopcart": {}, + "micronaut-similarity": {}, + "quarkus-hello-world": {}, + "quarkus-tika-odt": { + "barista-bench-name": "quarkus-tika", + }, + "quarkus-tika-pdf": { + "barista-bench-name": "quarkus-tika", + "workload": "pdf-workload.barista.json", + }, + "spring-hello-world": {}, + "spring-petclinic": {}, + }, + "latency_percentiles": [50.0, 75.0, 90.0, 99.0, 99.9, 99.99, 99.999, 100.0], + "rss_percentiles": [100, 99, 98, 97, 96, 95, 90, 75, 50, 25], + "disable_trackers": [mx_benchmark.RssTracker, mx_benchmark.PsrecordTracker, mx_benchmark.PsrecordMaxrssTracker, mx_benchmark.RssPercentilesTracker, mx_benchmark.RssPercentilesAndMaxTracker], +} + +class BaristaBenchmarkSuite(mx_benchmark.CustomHarnessBenchmarkSuite): + """Barista benchmark suite implementation. A collection of microservice workloads running on the Barista harness. + + The run arguments are passed to the Barista harness. + If you want to run something like `hwloc-bind` or `taskset` prefixed before the app, you should use the '--cmd-app-prefix' Barista harness option. + If you want to pass options to the app, you should use the '--app-args' Barista harness option. + """ + def __init__(self, custom_harness_command: mx_benchmark.CustomHarnessCommand = None): + if custom_harness_command is None: + custom_harness_command = BaristaBenchmarkSuite.BaristaCommand() + super().__init__(custom_harness_command) + self._version = None + self._context = None + + @property + def context(self): + return self._context + + @context.setter + def context(self, value): + self._context = value + + def readBaristaVersionFromPyproject(self): + # tomllib was included in python standard library with version 3.11 + try: + import tomllib + with open(self.baristaProjectConfigurationPath(), mode="rb") as pyproject: + return tomllib.load(pyproject)["project"]["version"] + except ImportError: + pass + + # fallback to 'toml' library if tomllib is not present + try: + import toml + with open(self.baristaProjectConfigurationPath(), mode="rt") as pyproject: + return toml.loads(pyproject.read())["project"]["version"] + except ImportError: + mx.warn("Could not read the Barista version from the project's `pyproject.toml` file because there is no toml parser installed. Use python3.11+ or install `toml` with pip.") + return self.defaultSuiteVersion() + + def version(self): + if self._version is None: + self._version = self.readBaristaVersionFromPyproject() + return self._version + + def name(self): + return "barista" + + def group(self): + return "Graal" + + def subgroup(self): + return "graal-compiler" + + def benchmarkList(self, bmSuiteArgs): + return self.completeBenchmarkList(bmSuiteArgs) + + def completeBenchmarkList(self, bmSuiteArgs): + return _baristaConfig["benchmarks"].keys() + + def baristaDirectoryPath(self): + barista_home = mx.get_env("BARISTA_HOME") + if barista_home is None or not os.path.isdir(barista_home): + mx.abort("Please set the BARISTA_HOME environment variable to a " + + "Barista benchmark suite directory.") + return barista_home + + def baristaFilePath(self, file_name): + barista_home = self.baristaDirectoryPath() + file_path = os.path.abspath(os.path.join(barista_home, file_name)) + if not os.path.isfile(file_path): + raise FileNotFoundError("The BARISTA_HOME environment variable points to a directory " + + f"that does not contain a '{file_name}' file.") + return file_path + + def baristaProjectConfigurationPath(self): + return self.baristaFilePath("pyproject.toml") + + def baristaBuilderPath(self): + return self.baristaFilePath("build") + + def baristaHarnessPath(self): + return self.baristaFilePath("barista") + + def baristaHarnessBenchmarkName(self): + return _baristaConfig["benchmarks"][self.context.benchmark].get("barista-bench-name", self.context.benchmark) + + def baristaHarnessBenchmarkWorkload(self): + return _baristaConfig["benchmarks"][self.context.benchmark].get("workload") + + def validateEnvironment(self): + self.baristaProjectConfigurationPath() + self.baristaHarnessPath() + + def register_tracker(self, name, tracker_type): + if tracker_type in _baristaConfig["disable_trackers"]: + mx.log(f"Ignoring the registration of '{name}' tracker as it was disabled for {self.__class__.__name__}.") + return + super().register_tracker(name, tracker_type) + + def createCommandLineArgs(self, benchmarks, bmSuiteArgs): + # Pass the VM options, BaristaCommand will form the final command. + return self.vmArgs(bmSuiteArgs) + + def all_command_line_args_are_vm_args(self): + return True + + def rules(self, out, benchmarks, bmSuiteArgs): + json_file_group_name = "barista_json_results_file_path" + json_file_pattern = fr"Saving all collected metrics to JSON file: (?P<{json_file_group_name}>\S+?)$" + all_rules = [] + + # Startup + all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, { + "benchmark": self.context.benchmark, + "metric.name": "request-time", + "metric.type": "numeric", + "metric.unit": "ms", + "metric.value": ("", float), + "metric.better": "lower", + "metric.iteration": ("", int), + "load-tester.id": ("", str), + "load-tester.method-type": "requests" + }, ["startup.id", "startup.measurements.iteration", "startup.measurements.response_time"])) + + # Warmup + all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, { + "benchmark": self.context.benchmark, + "metric.name": "warmup", + "metric.type": "numeric", + "metric.unit": "op/s", + "metric.value": ("", float), + "metric.better": "higher", + "metric.iteration": ("", int), + "load-tester.id": ("", str), + "load-tester.command": ("", str) + }, ["warmup.id", "warmup.measurements.iteration", "warmup.measurements.throughput", "warmup.measurements.command"])) + + # Throughput + all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, { + "benchmark": self.context.benchmark, + "metric.name": "throughput", + "metric.type": "numeric", + "metric.unit": "op/s", + "metric.value": ("", float), + "metric.better": "higher", + "metric.iteration": ("", int), + "load-tester.id": ("", str), + "load-tester.command": ("", str) + }, ["throughput.id", "throughput.measurements.iteration", "throughput.measurements.throughput", "throughput.measurements.command"])) + + # Latency + all_rules += [mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, { + "benchmark": self.context.benchmark, + "metric.name": "latency", + "metric.type": "numeric", + "metric.unit": "ms", + "metric.value": (f"", float), + "metric.percentile": float(percentile), + "metric.better": "lower", + "metric.iteration": ("", int), + "load-tester.id": ("", str), + "load-tester.command": ("", str) + }, [ + "latency__id", + "latency__measurements__final_measurements__iteration", + f"latency__measurements__final_measurements__p_values__{float(percentile)}", + "latency__measurements__final_measurements__command" + ], indexer_str="__") for percentile in _baristaConfig["latency_percentiles"]] + + # Resource Usage + all_rules += [mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, { + "benchmark": self.context.benchmark, + "metric.name": "rss", + "metric.type": "numeric", + "metric.unit": "MB", + "metric.value": (f"", float), + "metric.percentile": float(percentile), + "metric.better": "lower", + }, [ + f"resource_usage__rss__p{float(percentile)}" + ], indexer_str="__") for percentile in _baristaConfig["rss_percentiles"]] + # Ensure we are reporting the analogous numbers across suites (p99 at the time of writing this comment) + percentile_to_copy_into_max_rss = float(mx_benchmark.RssPercentilesTracker.MaxRssCopyRule.percentile_to_copy_into_max_rss) + all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, { + "benchmark": self.context.benchmark, + "metric.name": "max-rss", + "metric.type": "numeric", + "metric.unit": "MB", + "metric.value": (f"", float), + "metric.better": "lower", + }, [f"resource_usage__rss__p{percentile_to_copy_into_max_rss}"], indexer_str="__")) + + return all_rules + + def validateStdoutWithDimensions(self, out, benchmarks, bmSuiteArgs, retcode=None, dims=None, extraRules=None) -> DataPoints: + datapoints = super().validateStdoutWithDimensions(out, benchmarks, bmSuiteArgs, retcode=retcode, dims=dims, extraRules=extraRules) + for datapoint in datapoints: + # Expand the 'load-tester' field group + if "load-tester.command" in datapoint: + command = datapoint["load-tester.command"].split() + + if command[0] == "wrk": + datapoint["load-tester.method-type"] = "throughput" + else: + datapoint["load-tester.method-type"] = "latency" + datapoint["load-tester.options"] = ' '.join(command[1:]) + if "-R" in command: + datapoint["load-tester.rate"] = int(command[command.index("-R") + 1]) + if "-c" in command: + datapoint["load-tester.connections"] = int(command[command.index("-c") + 1]) + if "-t" in command: + datapoint["load-tester.threads"] = int(command[command.index("-t") + 1]) + + del datapoint["load-tester.command"] + return datapoints + + def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs): + self.enforce_single_benchmark(benchmarks) + self.context = BaristaBenchmarkSuite.RuntimeContext(self, vm, benchmarks[0], bmSuiteArgs) + return super()._vmRun(vm, workdir, command, benchmarks, bmSuiteArgs) + + def enforce_single_benchmark(self, benchmarks): + if not isinstance(benchmarks, list): + raise TypeError(f"{self.__class__.__name__} expects to receive a list of benchmarks to run, instead got an instance of {benchmarks.__class__.__name__}! Please specify a single benchmark!") + if len(benchmarks) != 1: + raise ValueError(f"You have requested {benchmarks} to be run but {self.__class__.__name__} can only run a single benchmark at a time! Please specify a single benchmark!") + + class BaristaCommand(mx_benchmark.CustomHarnessCommand): + """Maps a JVM command into a command tailored for the Barista harness. + """ + def _regexFindInCommand(self, cmd, pattern): + """Searches through the words of a command for a regex pattern. + + :param list[str] cmd: Command to search through. + :param str pattern: Regex pattern to search for. + :return: The match if one is found, None otherwise. + :rtype: re.Match + """ + for word in cmd: + m = re.search(pattern, word) + if m: + return m + return None + + def _updateCommandOption(self, cmd, option_name, option_short_name, new_value): + """Updates command option value, concatenates the new value with the existing one, if it is present. + + :param list[str] cmd: Command to be updated. + :param str option_name: Name of the option to be updated. + :param str option_short_name: Short name of the option to be updated. + :param str new_value: New value for the option, to be concatenated to the existing value, if it is present. + :return: Updated command. + :rtype: list[str] + """ + option_pattern = f"^(?:{option_name}=|{option_short_name}=)(.+)$" + existing_option_match = self._regexFindInCommand(cmd, option_pattern) + if existing_option_match: + cmd.remove(existing_option_match.group(0)) + new_value = f"{new_value} {existing_option_match.group(1)}" + cmd.append(f"{option_name}={new_value}") + + def produceHarnessCommand(self, cmd, suite): + """Maps a JVM command into a command tailored for the Barista harness. + + :param list[str] cmd: JVM command to be mapped. + :param BaristaBenchmarkSuite suite: Barista benchmark suite running the benchmark on the Barista harness. + :return: Command tailored for the Barista harness. + :rtype: list[str] + """ + if not isinstance(suite, BaristaBenchmarkSuite): + raise TypeError(f"Expected an instance of {BaristaBenchmarkSuite.__name__}, instead got an instance of {suite.__class__.__name__}") + jvm_cmd = cmd + + # Extract the path to the JVM distribution from the JVM command + java_exe_pattern = os.path.join("^(.*)", "bin", "java$") + java_exe_match = self._regexFindInCommand(jvm_cmd, java_exe_pattern) + if not java_exe_match: + raise ValueError(f"Could not find the path to the java executable in: {jvm_cmd}") + + # Extract VM options and command prefix from the JVM command + index_of_java_exe = jvm_cmd.index(java_exe_match.group(0)) + jvm_cmd_prefix = jvm_cmd[:index_of_java_exe] + jvm_vm_options = jvm_cmd[index_of_java_exe + 1:] + + # Verify that the run arguments don't already contain a "--mode" option + run_args = suite.runArgs(suite.context.bmSuiteArgs) + mode_pattern = r"^(?:-m|--mode)(=.*)?$" + mode_match = self._regexFindInCommand(run_args, mode_pattern) + if mode_match: + raise ValueError(f"You should not set the Barista '--mode' option manually! Found '{mode_match.group(0)}' in the run arguments!") + + # Get bench name and workload to use in the barista harness - we might have custom named benchmarks that need to be mapped + barista_bench_name = suite.baristaHarnessBenchmarkName() + barista_workload = suite.baristaHarnessBenchmarkWorkload() + + # Construct the Barista command + barista_cmd = [suite.baristaHarnessPath()] + barista_cmd.append(f"--java-home={java_exe_match.group(1)}") + if barista_workload is not None: + barista_cmd.append(f"--config={barista_workload}") + barista_cmd += run_args + if jvm_vm_options: + self._updateCommandOption(barista_cmd, "--vm-options", "-v", " ".join(jvm_vm_options)) + if jvm_cmd_prefix: + self._updateCommandOption(barista_cmd, "--cmd-app-prefix", "-p", " ".join(jvm_cmd_prefix)) + barista_cmd += ["--mode", "jvm"] + barista_cmd.append(barista_bench_name) + return barista_cmd + + class RuntimeContext(): + """Container class for the runtime context of BaristaBenchmarkSuite. + """ + def __init__(self, suite, vm, benchmark, bmSuiteArgs): + if not isinstance(suite, BaristaBenchmarkSuite): + raise TypeError(f"Expected an instance of {BaristaBenchmarkSuite.__name__}, instead got an instance of {suite.__class__.__name__}") + self._suite = suite + self._vm = vm + self._benchmark = benchmark + self._bmSuiteArgs = bmSuiteArgs + + @property + def suite(self): + return self._suite + + @property + def vm(self): + return self._vm + + @property + def benchmark(self): + """The currently running benchmark. + + Corresponds to `benchmarks[0]` in a suite method that has a `benchmarks` argument. + """ + return self._benchmark + + @property + def bmSuiteArgs(self): + return self._bmSuiteArgs + + +mx_benchmark.add_bm_suite(BaristaBenchmarkSuite()) + + _renaissanceConfig = { "akka-uct" : 24, "als" : 60, diff --git a/sdk/mx.sdk/suite.py b/sdk/mx.sdk/suite.py index 3b0b8a817870..3528650433b3 100644 --- a/sdk/mx.sdk/suite.py +++ b/sdk/mx.sdk/suite.py @@ -39,7 +39,7 @@ # SOFTWARE. # suite = { - "mxversion": "7.27.0", + "mxversion": "7.27.5.3", "name" : "sdk", "version" : "24.1.2", "release" : False, diff --git a/substratevm/mx.substratevm/mx_substratevm_benchmark.py b/substratevm/mx.substratevm/mx_substratevm_benchmark.py index 26b274855e04..8f4c206bdebf 100644 --- a/substratevm/mx.substratevm/mx_substratevm_benchmark.py +++ b/substratevm/mx.substratevm/mx_substratevm_benchmark.py @@ -28,7 +28,9 @@ import os import tempfile import zipfile +import re from glob import glob +from pathlib import Path import mx import mx_benchmark @@ -264,6 +266,204 @@ def successPatterns(self): mx_benchmark.add_bm_suite(RenaissanceNativeImageBenchmarkSuite()) +class BaristaNativeImageBenchmarkSuite(mx_sdk_benchmark.BaristaBenchmarkSuite, mx_sdk_benchmark.NativeImageBenchmarkMixin, mx_sdk_benchmark.NativeImageBundleBasedBenchmarkMixin): + """Native Image variant of the Barista benchmark suite implementation. A collection of microservice workloads running in native execution mode on the Barista harness. + + The run arguments are passed to the Barista harness. + If you want to run something like `hwloc-bind` or `taskset` prefixed before the app image, you should use the '--cmd-app-prefix' Barista harness option. + If you want to pass options to the app image, you should use the '--app-args' Barista harness option. + """ + def __init__(self, custom_harness_command: mx_benchmark.CustomHarnessCommand = None): + if custom_harness_command is None: + custom_harness_command = BaristaNativeImageBenchmarkSuite.BaristaNativeImageCommand() + super().__init__(custom_harness_command) + self._application_nibs = {} + # because of an issue in handling image build args in the intended order [GR-58214] + # we need the image name that is set inside the nib + self._application_fixed_image_names = {} + + def name(self): + return "barista-native-image" + + def benchSuiteName(self, bmSuiteArgs=None): + return "barista" + + def benchmarkName(self): + return self.context.benchmark + + def application_nib(self): + if self.benchmarkName() not in self._application_nibs: + # Run subprocess retrieving the application nib from the Barista 'build' script + out = mx.OutputCapture() + mx.run([self.baristaBuilderPath(), "--get-nib", self.baristaHarnessBenchmarkName()], out=out) + # Capture the application nib from the Barista 'build' script output + nib_pattern = r"application nib file path is: ([^\n]+)\n" + nib_match = re.search(nib_pattern, out.data) + if not nib_match: + raise ValueError(f"Could not extract the nib file path from the command output! Expected to match pattern {repr(nib_pattern)}.") + # Cache for future access + self._application_nibs[self.benchmarkName()] = nib_match.group(1) + # Try to capture the fixed image name from the Barista 'build' script output + fixed_image_name_pattern = r"fixed image name is: ([^\n]+)\n" + fixed_image_name_match = re.search(fixed_image_name_pattern, out.data) + # Cache fixed image name, if present + if fixed_image_name_match: + self._application_fixed_image_names[self.benchmarkName()] = fixed_image_name_match.group(1) + return self._application_nibs[self.benchmarkName()] + + def application_fixed_image_name(self): + self.application_nib() + return self._application_fixed_image_names.get(self.benchmarkName(), None) + + def applicationDist(self): + return Path(self.application_nib()).parent + + def uses_bundles(self): + return True + + def createCommandLineArgs(self, benchmarks, bmSuiteArgs): + # Pass the VM options, BaristaNativeImageCommand will form the final command. + return self.vmArgs(bmSuiteArgs) + + def extra_jvm_arg(self, benchmark, args): + # Added by BaristaNativeImageCommand + return [] + + def extra_agent_run_arg(self, benchmark, args, image_run_args): + # Added by BaristaNativeImageCommand + return [] + + def extra_profile_run_arg(self, benchmark, args, image_run_args, should_strip_run_args): + # Added by BaristaNativeImageCommand + return [] + + def extra_run_arg(self, benchmark, args, image_run_args): + # Added by BaristaNativeImageCommand + return [] + + def run(self, benchmarks, bmSuiteArgs) -> mx_benchmark.DataPoints: + return self.intercept_run(super(), benchmarks, bmSuiteArgs) + + def ensure_image_is_at_desired_location(self, bmSuiteArgs): + if self.stages_info.requested_stage.is_image() and self.application_fixed_image_name() is not None: + # Because of an issue in handling image build args in the intended order [GR-58214] + # we need to move the image from the path that is set inside the nib to the path expected by our vm. + # This code has no effect if the image is already at the desired location. + vm = self.get_vm_registry().get_vm_from_suite_args(bmSuiteArgs) + if vm.stages_info.should_produce_datapoints(mx_sdk_benchmark.Stage.INSTRUMENT_IMAGE): + desired_image_path = vm.config.instrumented_image_path + elif vm.stages_info.should_produce_datapoints(mx_sdk_benchmark.Stage.IMAGE): + desired_image_path = vm.config.image_path + else: + return + actual_image_path = desired_image_path.parent / self.application_fixed_image_name() + if actual_image_path.is_file() and not desired_image_path.is_file(): + mx.move(actual_image_path, desired_image_path) + + def runAndReturnStdOut(self, benchmarks, bmSuiteArgs): + retcode, out, dims = super().runAndReturnStdOut(benchmarks, bmSuiteArgs) + self.ensure_image_is_at_desired_location(bmSuiteArgs) + return retcode, out, dims + + class BaristaNativeImageCommand(mx_sdk_benchmark.BaristaBenchmarkSuite.BaristaCommand): + """Maps the command produced by NativeImageVM into a command tailored for the Barista harness. + """ + def _short_load_testing_phases(self): + """Configures the main barista load-testing phases to be quite short. + + Useful for the `agent` and `instrument-run` stages. + """ + return [ + "--warmup-iteration-count", "1", + "--warmup-duration", "5", + "--throughput-iteration-count", "0", + "--latency-iteration-count", "0", + ] + + def _get_built_app_image(self, suite, stage): + """Retrieves the path to the app image built in the previous stage. + + In the case of `instrument-run`, retrieves the image built during `instrument-image`. + In the case of `run`, retrieves the image built during `image`. + """ + vm = suite.context.vm + if stage == mx_sdk_benchmark.Stage.INSTRUMENT_RUN: + return vm.config.instrumented_image_path + else: + return vm.config.image_path + + def produce_JVM_harness_command(self, cmd, suite): + """Maps a JVM command into a command tailored for the Barista harness. + + Utilizes the implementation of the ``mx_sdk_benchmark.BaristaBenchmarkSuite.BaristaCommand`` base class + """ + return super().produceHarnessCommand(cmd, suite) + + def produceHarnessCommand(self, cmd, suite): + """Maps a NativeImageVM command into a command tailored for the Barista harness. + + This method is invoked only in the `agent`, `instrument-run` and `run` stages, because hooks are + only applied in these stages (defined in ``NativeImageBenchmarkMixin.run_stage``). + In the case of the `agent` stage, relies on the parent ``BaristaCommand`` class for the mapping. + + :param list[str] cmd: NativeImageVM command to be mapped. + :param BaristaNativeImageBenchmarkSuite suite: Barista benchmark suite running the benchmark on the Barista harness. + :return: Command tailored for the Barista harness. + :rtype: list[str] + """ + if not isinstance(suite, BaristaNativeImageBenchmarkSuite): + raise TypeError(f"Expected an instance of {BaristaNativeImageBenchmarkSuite.__name__}, instead got an instance of {suite.__class__.__name__}") + + stage = suite.stages_info.requested_stage + if stage == mx_sdk_benchmark.Stage.AGENT: + # BaristaCommand works for agent stage, since it's a JVM stage + cmd = self.produce_JVM_harness_command(cmd, suite) + # Make agent run short + cmd += self._short_load_testing_phases() + # Add explicit agent stage args + cmd += mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.context.bmSuiteArgs) + cmd += mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-agent-run-arg=", suite.context.bmSuiteArgs) + return cmd + + # Extract app image options and command prefix from the NativeImageVM command + app_image = str(self._get_built_app_image(suite, stage)) + try: + index_of_app_image = cmd.index(app_image) + except: + mx.log_error(f"Could not find app image '{app_image}' in {cmd}") + raise + nivm_cmd_prefix = cmd[:index_of_app_image] + nivm_app_options = cmd[index_of_app_image + 1:] + + # Get bench name and workload to use in the barista harness - we might have custom named benchmarks that need to be mapped + barista_bench_name = suite.baristaHarnessBenchmarkName() + barista_workload = suite.baristaHarnessBenchmarkWorkload() + + # Provide image built in the previous stage to the Barista harnesss using the `--app-executable` option + ni_barista_cmd = [suite.baristaHarnessPath(), "--mode", "native", "--app-executable", app_image] + if barista_workload is not None: + ni_barista_cmd.append(f"--config={barista_workload}") + ni_barista_cmd += suite.runArgs(suite.context.bmSuiteArgs) + ni_barista_cmd += mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.context.bmSuiteArgs) + if stage == mx_sdk_benchmark.Stage.INSTRUMENT_RUN: + # Make instrument run short + ni_barista_cmd += self._short_load_testing_phases() + # Add explicit instrument stage args + ni_barista_cmd += mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-profile-run-arg=", suite.context.bmSuiteArgs) or mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", suite.context.bmSuiteArgs) + else: + # Add explicit run stage args + ni_barista_cmd += mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", suite.context.bmSuiteArgs) + if nivm_cmd_prefix: + self._updateCommandOption(ni_barista_cmd, "--cmd-app-prefix", "-p", " ".join(nivm_cmd_prefix)) + if nivm_app_options: + self._updateCommandOption(ni_barista_cmd, "--app-args", "-a", " ".join(nivm_app_options)) + ni_barista_cmd += [barista_bench_name] + return ni_barista_cmd + + +mx_benchmark.add_bm_suite(BaristaNativeImageBenchmarkSuite()) + + class BaseDaCapoNativeImageBenchmarkSuite(): '''`SetBuildInfo` method in DaCapo source reads from the file nested in daCapo jar. diff --git a/substratevm/mx.substratevm/suite.py b/substratevm/mx.substratevm/suite.py index b9d6a5833bca..c9514c4e8f5c 100644 --- a/substratevm/mx.substratevm/suite.py +++ b/substratevm/mx.substratevm/suite.py @@ -1,6 +1,6 @@ # pylint: disable=line-too-long suite = { - "mxversion": "7.27.1", + "mxversion": "7.27.5.3", "name": "substratevm", "version" : "24.1.2", "release" : False, diff --git a/vm/mx.vm/mx_vm_benchmark.py b/vm/mx.vm/mx_vm_benchmark.py index 7c66a2b82895..1610104c8eb9 100644 --- a/vm/mx.vm/mx_vm_benchmark.py +++ b/vm/mx.vm/mx_vm_benchmark.py @@ -134,7 +134,7 @@ def __init__(self, vm: NativeImageVM, bm_suite: BenchmarkSuite | NativeImageBenc self.bm_suite = bm_suite self.benchmark_suite_name = bm_suite.benchSuiteName(args) self.benchmark_name = bm_suite.benchmarkName() - self.executable, self.classpath_arguments, self.modulepath_arguments, self.system_properties, self.image_vm_args, image_run_args, self.split_run = NativeImageVM.extract_benchmark_arguments(args) + self.executable, self.classpath_arguments, self.modulepath_arguments, self.system_properties, self.image_vm_args, image_run_args, self.split_run = NativeImageVM.extract_benchmark_arguments(args, bm_suite.all_command_line_args_are_vm_args()) self.extra_image_build_arguments: List[str] = bm_suite.extra_image_build_argument(self.benchmark_name, args) # use list() to create fresh copies to safeguard against accidental modification self.image_run_args = bm_suite.extra_run_arg(self.benchmark_name, args, list(image_run_args)) @@ -720,7 +720,10 @@ def supported_vm_arg_prefixes(): '--patch-module', '--boot-class-path', '--source-path', '-cp', '-classpath', '-p'] @staticmethod - def _split_vm_arguments(args): + def _split_vm_arguments(args, all_args_are_vm_args): + if all_args_are_vm_args: + return args, [], [] + i = 0 while i < len(args): arg = args[i] @@ -736,7 +739,7 @@ def _split_vm_arguments(args): mx.abort('No executable found in args: ' + str(args)) @staticmethod - def extract_benchmark_arguments(args): + def extract_benchmark_arguments(args, all_args_are_vm_args): i = 0 clean_args = args[:] split_run = None @@ -750,7 +753,7 @@ def extract_benchmark_arguments(args): else: i += 1 clean_args = [x for x in clean_args if "-Dnative-image" not in x] - vm_args, executable, image_run_args = NativeImageVM._split_vm_arguments(clean_args) + vm_args, executable, image_run_args = NativeImageVM._split_vm_arguments(clean_args, all_args_are_vm_args) classpath_arguments = [] modulepath_arguments = []