diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 43c7c5b1..6f2eafd7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: with: java-version: openjdk${{ matrix.java }} - name: Build with Maven - run: mvn -fae -V -B clean test -Dinclude.serverless + run: mvn -fae -V -B clean test -Dinclude.serverless -Dinclude.datagrid - name: Zip Artifacts run: | zip -R artifacts-jvm${{ matrix.java }}.zip 'surefire-reports/*' diff --git a/README.md b/README.md index 5d8ca287..26050b3d 100644 --- a/README.md +++ b/README.md @@ -536,27 +536,37 @@ Container images used in the tests are: ### `infinispan-client` -Verifies the way of sharing cache with Datagrid operator. +Verifies the way of the sharing cache by Datagrid operator and Infinispan cluster and data consistency after failures. -#### Prerequisities +#### Prerequisites - Datagrid operator installed in `datagrid-operator` namespace. This needs cluster-admin rights to install. - The operator supports only single-namespace so it has to watch another well-known namespace `datagrid-cluster`. -This namespace must be created by "qe" user or this user must have access to it because infinispan tests are connecting to it. +This namespace must be created by "qe" user or this user must have access to it because tests are connecting to it. - These namespaces should be prepared after the Openshift installation - See [Installing Data Grid Operator](https://access.redhat.com/documentation/en-us/red_hat_data_grid/8.1/html/running_data_grid_on_openshift/installation) -Tests are creating an infinispan cluster in the `datagrid-cluster` namespace. Cluster is created before tests by `infinispan_cluster_config.yaml`. -To allow parallel runs of tests this cluster must be renamed for every test run - along with configmap `infinispan-config`. The configmap contains +The test suite contains a Maven profile activated using the `include.datagrid`. +To execute Datagrid tests use the following switch in the maven command: + +``` +-Dinclude.datagrid +``` + +Tests create an Infinispan cluster in the `datagrid-cluster` namespace. Cluster is created before tests by `infinispan_cluster_config.yaml`. +To allow parallel runs of tests this cluster is renamed for every test run - along with configmap `infinispan-config`. The configmap contains configuration property `quarkus.infinispan-client.server-list`. Value of this property is a path to the infinispan cluster from test namespace, -its structure is `infinispan-cluster-name.datagrid-cluster-namespace.svc.cluster.local:11222`. It is because testsuite using dynamically generated -namespaces for tests. So this path is needed for tests to find infinispan server. +its structure is `infinispan-cluster-name.datagrid-cluster-namespace.svc.cluster.local:11222`. It is because the testsuite uses dynamically generated +namespaces for tests. So this path is needed for the tests to find Infinispan cluster in another namespace. -The infinispan cluster needs 2 special secrets - tls-secret with TLS certificate and connect-secret with the credentials. +The Infinispan cluster needs 2 special secrets - tls-secret with TLS certificate and connect-secret with the credentials. TLS certificate is a substitution of `secrets/signing-key` in openshift-service-ca namespace, which "qe" user cannot use (doesn't have rights on it). Clientcert secret is generated for "qe" from the tls-secret mentioned above. -Infinispan client test are using the cache directly with @Inject and @RemoteCache. Through the JAX-RS endpoint, we send data into the cache and retrieve it through another JAX-RS endpoint. -The next tests are checking a simple fail-over - first client (application) fail, then Infinispan cluster (cache) fail. Tests kill either the Quarkus pod or Infinispan cluster pod, then wait for redeployment, and check data. -For the Quarkus application pod killing is used the same approach as in configmap tests. +Infinispan client tests use the cache directly with `@Inject` and `@RemoteCache`. Through the JAX-RS endpoint, we send data into the cache and retrieve it through another JAX-RS endpoint. +The next tests are checking a simple fail-over - first client (application) fail, then Infinispan cluster (cache) fail. Tests kill first the Quarkus pod then Infinispan cluster pod and then check data. +For the Quarkus application, pod killing is used the same approach as in configmap tests. For the Infinispan cluster, pod killing is updated its YAML snipped and uploaded with zero replicas. +By default, when the Infinispan server is down and the application can't open a connection, it tries to connect again, up to 10 times (max_retries) and gives up after 60s (connect_timeout). +Because of that we are using the `hotrod-client.properties` file where are the max_retries and connect_timeout reduced. Without this the application will be still trying to connect to the Infinispan server next 10 minutes and the incremented number can appear later. +The last three tests are for testing of the multiple client access to the cache. We simulate the second client by deploying the second deployment config, Service, and Route for these tests. These are copied from the `openshift.yml` file. ### `security/basic` diff --git a/infinispan-client/src/main/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCounterResource.java b/infinispan-client/src/main/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCounterResource.java index 7edf3cc6..b335f1de 100644 --- a/infinispan-client/src/main/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCounterResource.java +++ b/infinispan-client/src/main/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCounterResource.java @@ -29,7 +29,7 @@ public Integer getCacheCounter() { @Path("/get-client") @GET @Produces(MediaType.TEXT_PLAIN) - public int getClientCounter() { + public Integer getClientCounter() { return counter.get(); } @@ -37,9 +37,10 @@ public int getClientCounter() { @PUT @Produces(MediaType.TEXT_PLAIN) public String incCounters() { - int invocationNumber = counter.incrementAndGet(); - cache.put("counter", cache.get("counter") + 1); - return "Cache=" + cache.get("counter") + " Client=" + invocationNumber; + int invocationClientNumber = counter.incrementAndGet(); + int invocationCacheNumber = cache.get("counter") + 1; + cache.put("counter", invocationCacheNumber); + return "Cache=" + invocationCacheNumber + " Client=" + invocationClientNumber; } @Path("/reset-cache") diff --git a/infinispan-client/src/main/resources/META-INF/hotrod-client.properties b/infinispan-client/src/main/resources/META-INF/hotrod-client.properties new file mode 100644 index 00000000..3d91443f --- /dev/null +++ b/infinispan-client/src/main/resources/META-INF/hotrod-client.properties @@ -0,0 +1,2 @@ +infinispan.client.hotrod.connect_timeout=1000 +infinispan.client.hotrod.max_retries=0 diff --git a/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/AbstractInfinispanResourceTest.java b/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/AbstractInfinispanResourceTest.java new file mode 100644 index 00000000..dc678cdf --- /dev/null +++ b/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/AbstractInfinispanResourceTest.java @@ -0,0 +1,268 @@ +package io.quarkus.ts.openshift.infinispan.client; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.KubernetesList; +import io.fabric8.kubernetes.api.model.Service; +import io.fabric8.kubernetes.client.utils.Serialization; +import io.fabric8.openshift.api.model.DeploymentConfig; +import io.fabric8.openshift.api.model.Route; +import io.fabric8.openshift.client.OpenShiftClient; +import io.quarkus.ts.openshift.app.metadata.AppMetadata; +import io.quarkus.ts.openshift.common.Command; +import io.quarkus.ts.openshift.common.CustomizeApplicationDeployment; +import io.quarkus.ts.openshift.common.injection.TestResource; +import io.quarkus.ts.openshift.common.util.AwaitUtil; +import io.quarkus.ts.openshift.common.util.OpenShiftUtil; +import org.junit.jupiter.api.AfterAll; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static io.restassured.RestAssured.when; +import static org.awaitility.Awaitility.await; +import static org.hamcrest.CoreMatchers.is; + +public abstract class AbstractInfinispanResourceTest { + protected static final String ORIGIN_CLUSTER_NAME = "totally-random-infinispan-cluster-name"; + protected static final String CLUSTER_CONFIG_PATH = "target/test-classes/infinispan_cluster_config.yaml"; + protected static final String CLUSTER_CONFIGMAP_PATH = "target/test-classes/infinispan_cluster_configmap.yaml"; + protected static final String CONNECT_SECRET = "target/test-classes/connect_secret.yaml"; + protected static final String TLS_SECRET = "target/test-classes/tls_secret.yaml"; + + protected static final String CLUSTER_NAMESPACE_NAME = "datagrid-cluster"; + protected static final String SECOND_CLIENT_APPLICATION_NAME = "another-infinispan-client"; + protected static final String SECOND_CLIENT_DEPLOYMENT_CONFIG = "target/test-classes/deployment_config_second_client.yaml"; + protected static String NEW_CLUSTER_NAME = null; + + @TestResource + protected AppMetadata metadata; + + @TestResource + protected OpenShiftUtil openshift; + + @TestResource + protected AwaitUtil await; + + @TestResource + protected URL appUrl; + + /** + * Application deployment is performed by the Quarkus Kubernetes extension during test execution. + * Creating an infinispan cluster, its secrets, setting the path to it for the application, and deploying the second app. + * + * @param oc + * @param metadata + * @throws IOException + * @throws InterruptedException + */ + @CustomizeApplicationDeployment + public static void deploy(OpenShiftClient oc, AppMetadata metadata) throws IOException, InterruptedException { + new Command("oc", "apply", "-f", CONNECT_SECRET).runAndWait(); + new Command("oc", "apply", "-f", TLS_SECRET).runAndWait(); + + // there should be unique name for every created infinispan cluster to be able parallel runs + NEW_CLUSTER_NAME = oc.getNamespace() + "-infinispan-cluster"; + + // rename infinispan cluster and configmap + adjustYml(CLUSTER_CONFIG_PATH, ORIGIN_CLUSTER_NAME, NEW_CLUSTER_NAME); + adjustYml(CLUSTER_CONFIGMAP_PATH, ORIGIN_CLUSTER_NAME, NEW_CLUSTER_NAME); + + new Command("oc", "apply", "-f", CLUSTER_CONFIGMAP_PATH).runAndWait(); + new Command("oc", "apply", "-f", CLUSTER_CONFIG_PATH).runAndWait(); + + new Command("oc", "-n", CLUSTER_NAMESPACE_NAME, "wait", "--for", "condition=wellFormed", "--timeout=300s", "infinispan/" + NEW_CLUSTER_NAME).runAndWait(); + + deploySecondInfinispanClient(oc, metadata); + } + + // Undeployment of the second application and infinispan cluster + @AfterAll + public static void undeploy() throws IOException, InterruptedException { + new Command("oc", "delete", "-f", SECOND_CLIENT_DEPLOYMENT_CONFIG).runAndWait(); + new Command("oc", "delete", "-f", CLUSTER_CONFIGMAP_PATH).runAndWait(); + new Command("oc", "delete", "-f", CLUSTER_CONFIG_PATH).runAndWait(); + } + + /** + * This method copy the 'openshift.yml' file, changes its name, labels, etc., and deploys it as a second application in OCP. + * For that we need only DeploymentConfig, Service, and Route. + * + * @param oc + * @param metadata + * @throws IOException + * @throws InterruptedException + */ + public static void deploySecondInfinispanClient(OpenShiftClient oc, AppMetadata metadata) throws IOException, InterruptedException { + List objs = oc.load(Files.newInputStream(Paths.get("target/kubernetes/openshift.yml"))).get(); + List necessary_objects = new ArrayList<>(); + + HashMap change = new HashMap<>(); + change.put("app.kubernetes.io/name", SECOND_CLIENT_APPLICATION_NAME); + + for (HasMetadata obj : objs) { + if (obj.getMetadata().getName().equals(metadata.appName)) { + if (obj instanceof DeploymentConfig) { + DeploymentConfig dc = (DeploymentConfig) obj; + dc.getMetadata().setName(SECOND_CLIENT_APPLICATION_NAME); + dc.getMetadata().setLabels(change); + dc.getSpec().setSelector(change); + dc.getSpec().getTemplate().getMetadata().setLabels(change); + necessary_objects.add(dc); + } + + if (obj instanceof Service) { + Service service = (Service) obj; + service.getMetadata().setName(SECOND_CLIENT_APPLICATION_NAME); + service.getSpec().setSelector(change); + necessary_objects.add(service); + } + + if (obj instanceof Route) { + Route route = (Route) obj; + route.getMetadata().setName(SECOND_CLIENT_APPLICATION_NAME); + route.getSpec().getTo().setName(SECOND_CLIENT_APPLICATION_NAME); + route.getSpec().setHost(""); + route.getSpec().setPath(""); + necessary_objects.add(route); + } + } + } + + KubernetesList list = new KubernetesList(); + list.setItems(necessary_objects); + Serialization.yamlMapper().writeValue(Files.newOutputStream(Paths.get(new File(SECOND_CLIENT_DEPLOYMENT_CONFIG).getPath())), list); + + new Command("oc", "apply", "-f", SECOND_CLIENT_DEPLOYMENT_CONFIG).runAndWait(); + } + + /** + * Setting the cache counter value to 0 from provided client url address. + * At the end, the cache value is tested that it is actually 0. + * + * @param url + */ + public void resetCacheCounter(String url) { + await().atMost(5, TimeUnit.MINUTES).untilAsserted(() -> { + when() + .put(url) + .then() + .body(is("Cache=0")); + }); + } + + /** + * Setting the client atomic integer counter to 0 in the provided client url address. + * At the end, the client counter value is tested that it is actually 0. + * + * @param url + */ + public void resetClientCounter(String url) { + await().atMost(5, TimeUnit.MINUTES).untilAsserted(() -> { + when() + .put(url) + .then() + .body(is("Client=0")); + }); + } + + /** + * Getting the value of either cache or client counters from the provided url address. + * Tested is only the right returned status code. + * + * @param url + * @return endpoint value as String + */ + public String getCounterValue(String url) { + String actualResponse = + when() + .get(url) + .then().statusCode(200) + .extract().asString(); + + return actualResponse; + } + + /** + * Increasing cache and client counters by 1 from the provided url address. + * + * @param url + * @return increased endpoint value as String + */ + public String fillTheCache(String url) { + String actualResponse = + when() + .put(url) + .then().statusCode(200) + .extract().asString(); + + return actualResponse; + } + + /** + * Increasing cache and client counters by the provided count value from the provided url address. + * + * @param url + * @param count + */ + public void incrementCountersOnValue(String url, int count) { + for (int i = 1; i <= count; i++) { + when() + .put(url) + .then() + .statusCode(200); + } + } + + /** + * Reduces the number of infinispan cluster replicas to 0 and wait for the shutdown condition. It is done by changing + * the YAML file in the target/test-classes directory. + * + * @throws IOException + * @throws InterruptedException + */ + public void killInfinispanCluster() throws IOException, InterruptedException { + adjustYml(CLUSTER_CONFIG_PATH, "replicas: 1", "replicas: 0"); + new Command("oc", "apply", "-f", CLUSTER_CONFIG_PATH).runAndWait(); + new Command("oc", "-n", CLUSTER_NAMESPACE_NAME, "wait", "--for", "condition=gracefulShutdown", "--timeout=300s", "infinispan/" + NEW_CLUSTER_NAME).runAndWait(); + } + + /** + * The number of replicas is increased back to value 1 the same way as in "killInfinispanCluster()" method. The wait command + * expects "wellFormed" condition in Infinispan cluster status. + * + * @throws IOException + * @throws InterruptedException + */ + public void restartInfinispanCluster() throws IOException, InterruptedException { + adjustYml(CLUSTER_CONFIG_PATH, "replicas: 0", "replicas: 1"); + new Command("oc", "apply", "-f", CLUSTER_CONFIG_PATH).runAndWait(); + new Command("oc", "-n", CLUSTER_NAMESPACE_NAME, "wait", "--for", "condition=wellFormed", "--timeout=360s", "infinispan/" + NEW_CLUSTER_NAME).runAndWait(); + } + + /** + * Replacing values in the provided YAML file + * + * @param path + * @param originString + * @param newString + * @throws IOException + */ + public static void adjustYml(String path, String originString, String newString) throws IOException { + Path yamlPath = Paths.get(path); + Charset charset = StandardCharsets.UTF_8; + + String yamlContent = new String(Files.readAllBytes(yamlPath), charset); + yamlContent = yamlContent.replace(originString, newString); + Files.write(yamlPath, yamlContent.getBytes(charset)); + } +} diff --git a/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCountersOpenShiftIT.java b/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCountersOpenShiftIT.java index a8e9f225..5e83b35a 100644 --- a/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCountersOpenShiftIT.java +++ b/infinispan-client/src/test/java/io/quarkus/ts/openshift/infinispan/client/InfinispanCountersOpenShiftIT.java @@ -1,16 +1,10 @@ package io.quarkus.ts.openshift.infinispan.client; -import io.fabric8.openshift.client.OpenShiftClient; -import io.quarkus.ts.openshift.app.metadata.AppMetadata; import io.quarkus.ts.openshift.common.AdditionalResources; -import io.quarkus.ts.openshift.common.Command; -import io.quarkus.ts.openshift.common.CustomizeApplicationDeployment; -import io.quarkus.ts.openshift.common.CustomizeApplicationUndeployment; +import io.quarkus.ts.openshift.common.OnlyIfConfigured; import io.quarkus.ts.openshift.common.OpenShiftTest; +import io.quarkus.ts.openshift.common.OpenShiftTestException; import io.quarkus.ts.openshift.common.deploy.UsingQuarkusPluginDeploymentStrategy; -import io.quarkus.ts.openshift.common.injection.TestResource; -import io.quarkus.ts.openshift.common.util.AwaitUtil; -import io.quarkus.ts.openshift.common.util.OpenShiftUtil; import org.hamcrest.Matchers; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Order; @@ -18,155 +12,86 @@ import org.junit.jupiter.api.TestMethodOrder; import java.io.IOException; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.concurrent.TimeUnit; import static io.restassured.RestAssured.when; import static org.awaitility.Awaitility.await; -import static org.hamcrest.CoreMatchers.is; +import static org.junit.jupiter.api.Assertions.assertEquals; @OpenShiftTest(strategy = UsingQuarkusPluginDeploymentStrategy.class) @AdditionalResources("classpath:clientcert_secret.yaml") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class InfinispanCountersOpenShiftIT { - - private static final String ORIGIN_CLUSTER_NAME = "totally-random-infinispan-cluster-name"; - private static final String CLUSTER_CONFIG_PATH = "target/test-classes/infinispan_cluster_config.yaml"; - private static final String CLUSTER_CONFIGMAP_PATH = "target/test-classes/infinispan_cluster_configmap.yaml"; - - private static final String CLUSTER_NAMESPACE_NAME = "datagrid-cluster"; - private static String NEW_CLUSTER_NAME = null; - - @TestResource - private AppMetadata metadata; - - @TestResource - private OpenShiftUtil openshift; - - @TestResource - private AwaitUtil await; - - // Application deployment is performed by the Quarkus Kubernetes extension during test execution. - // Creating an infinispan cluster, its secrets and setting the path to it for the application - @CustomizeApplicationDeployment - public static void deploy(OpenShiftClient oc) throws IOException, InterruptedException { - new Command("oc", "apply", "-f", "target/test-classes/connect_secret.yaml").runAndWait(); - new Command("oc", "apply", "-f", "target/test-classes/tls_secret.yaml").runAndWait(); - - // there should be unique name for every created infinispan cluster to be able parallel runs - NEW_CLUSTER_NAME = oc.getNamespace() + "-infinispan-cluster"; - - adjustYml(CLUSTER_CONFIG_PATH, ORIGIN_CLUSTER_NAME, NEW_CLUSTER_NAME); - adjustYml(CLUSTER_CONFIGMAP_PATH, ORIGIN_CLUSTER_NAME, NEW_CLUSTER_NAME); - - new Command("oc", "apply", "-f", CLUSTER_CONFIGMAP_PATH).runAndWait(); - new Command("oc", "apply", "-f", CLUSTER_CONFIG_PATH).runAndWait(); - - new Command("oc", "-n", CLUSTER_NAMESPACE_NAME, "wait", "--for", "condition=wellFormed", "--timeout=300s", "infinispan/" + NEW_CLUSTER_NAME).runAndWait(); - } - - // Undeployment of the application and infinispan cluster - @CustomizeApplicationUndeployment - public static void undeploy() throws IOException, InterruptedException { - new Command("oc", "delete", "-f", CLUSTER_CONFIGMAP_PATH).runAndWait(); - new Command("oc", "delete", "-f", CLUSTER_CONFIG_PATH).runAndWait(); - } - +@OnlyIfConfigured("ts.authenticated-registry") +public class InfinispanCountersOpenShiftIT extends AbstractInfinispanResourceTest { + + /** + * Simple check of connection to endpoints + * + * Expected values = 0 + */ @Test @Order(1) public void testConnectToEndpoints() { - when() - .get("/first-counter/get-cache") - .then() - .statusCode(200) - .body(is("0")); + String firstEndpointCache = getCounterValue(appUrl + "/first-counter/get-cache"); + String secondEndpointCache = getCounterValue(appUrl + "/second-counter/get-cache"); - when() - .get("/second-counter/get-cache") - .then() - .statusCode(200) - .body(is("0")); + assertEquals(firstEndpointCache, secondEndpointCache); } + /** + * Test increment counters by 1 + * + * Client counters should be 1 for both endpoints + * Cache counter is shared and should be 2 + */ @Test @Order(2) public void testUpdateCacheOnEndpoints() { - // fill the cache in the first class - when() - .put("/first-counter/increment-counters") - .then() - .statusCode(200) - .body(is("Cache=1 Client=1")); + String firstEndpointCounters = fillTheCache(appUrl + "/first-counter/increment-counters"); + String secondEndpointCounters = fillTheCache(appUrl + "/second-counter/increment-counters"); - // fill the cache in the second class - when() - .put("/second-counter/increment-counters") - .then() - .statusCode(200) - .body(is("Cache=2 Client=1")); - - // check the cache counter - when() - .get("/first-counter/get-cache") - .then() - .statusCode(200) - .body(is("2")); - - when() - .get("/second-counter/get-cache") - .then() - .statusCode(200) - .body(is("2")); - - // check the client counter - when() - .get("/first-counter/get-client") - .then() - .statusCode(200) - .body(is("1")); - - when() - .get("/second-counter/get-client") - .then() - .statusCode(200) - .body(is("1")); + assertEquals("Cache=1 Client=1", firstEndpointCounters); + assertEquals("Cache=2 Client=1", secondEndpointCounters); } + /** + * Client fail-over test. Testing the Quarkus application will connect back to the DataGrid server after restart. + * + * Cache counter should remain the same. + * Client counter is reset to 0 + */ @Test @Order(3) public void testCacheAfterClientsRestart() { - // always start from 0 - resetCounters(); + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + // fill the cache - incrementCountersOnValue(10); + incrementCountersOnValue(appUrl + "/first-counter/increment-counters", 10); // restart the app openshift.rolloutChanges(metadata.appName); - // check the cache counter - when() - .get("/first-counter/get-cache") - .then() - .statusCode(200) - .body(is("10")); + String cacheCounter = getCounterValue(appUrl + "/first-counter/get-cache"); + String clientCounter = getCounterValue(appUrl + "/first-counter/get-client"); - // check the client counter - when() - .get("/first-counter/get-client") - .then() - .statusCode(200) - .body(is("0")); + assertEquals("10", cacheCounter); + assertEquals("0", clientCounter); } + /** + * Client fail-over test. Testing the request to the DataGrid server by the failed Quarkus application. + * + * Cache counter should remain the same. + * Client counter is reset to 0 + */ @Test @Order(4) public void testInvokeWithFailedNode() { - resetCounters(); - incrementCountersOnValue(10); + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + + incrementCountersOnValue(appUrl + "/first-counter/increment-counters", 10); // kill the app = fail of the client int replicas = openshift.countReadyReplicas(metadata.appName); @@ -174,34 +99,40 @@ public void testInvokeWithFailedNode() { // try to invoke the cache when() - .put("/first-counter/increment-counters") + .put(appUrl + "/first-counter/increment-counters") .then() - .statusCode(Matchers.allOf(Matchers.greaterThanOrEqualTo(500),Matchers.lessThan(600))); + .statusCode(Matchers.allOf(Matchers.greaterThanOrEqualTo(500), Matchers.lessThan(600))); // turn-on the app openshift.scale(metadata.appName, replicas); await.awaitAppRoute(); - // check the cache counter - when() - .get("/first-counter/get-cache") - .then() - .statusCode(200) - .body(is("10")); + String cacheCounter = getCounterValue(appUrl + "/first-counter/get-cache"); + String clientCounter = getCounterValue(appUrl + "/first-counter/get-client"); - // check the client counter - when() - .get("/first-counter/get-client") - .then() - .statusCode(200) - .body(is("0")); + assertEquals("10", cacheCounter); + assertEquals("0", clientCounter); } + /** + * Infinispan fail-over test. Testing restart the infinispan cluster in DataGrid operator and wait the Quarkus + * application connects back. The restart is done by reducing the number of infinispan cluster replicas to 0 and it waits + * for the shutdown condition. Then the number of replicas is changed back to 1. + * + * We don't have cache backup in this test case, so the cache is deleted by the restart of infinispan cluster. + * The cache definition "mycache" remains, but the "counter" cache in it is deleted. + * Client counter should remain with the same value after the restart. + * + * @throws IOException + * @throws InterruptedException + */ @Test @Order(5) public void testRestartInfinispanCluster() throws IOException, InterruptedException { - resetCounters(); - incrementCountersOnValue(10); + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + + incrementCountersOnValue(appUrl + "/first-counter/increment-counters", 10); killInfinispanCluster(); restartInfinispanCluster(); @@ -209,23 +140,33 @@ public void testRestartInfinispanCluster() throws IOException, InterruptedExcept // try to connect back to infinispan cluster and expect no content await().atMost(5, TimeUnit.MINUTES).untilAsserted(() -> { when() - .get("/first-counter/get-cache") - .then() + .get(appUrl + "/first-counter/get-cache") + .then() .statusCode(204); }); - // check the client counter - when() - .get("/first-counter/get-client") - .then() - .body(is("10")); + String clientCounter = getCounterValue(appUrl + "/first-counter/get-client"); + assertEquals("10", clientCounter); } + /** + * Infinispan fail-over test. Testing a restart of the infinispan cluster and increment/change the cache counter value + * after the restart. The cache is deleted by the restart of infinispan cluster. Because of this, we need to fill the cache + * again. It is done by 'cache.put("counter", 0)'. Then it could be incremented. + * + * Cache newly created after the restart and incremented by 1 so it should be only 1. + * Client counter should remain the same during the restart and after the counter incrementing should by increased by 1. + * + * @throws IOException + * @throws InterruptedException + */ @Test @Order(6) public void testIncrementAfterRestartInfinispanCluster() throws IOException, InterruptedException { - resetCounters(); - incrementCountersOnValue(10); + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + + incrementCountersOnValue(appUrl + "/first-counter/increment-counters", 10); killInfinispanCluster(); restartInfinispanCluster(); @@ -233,96 +174,150 @@ public void testIncrementAfterRestartInfinispanCluster() throws IOException, Int // try to connect back to infinispan cluster and expect no content await().atMost(5, TimeUnit.MINUTES).untilAsserted(() -> { when() - .get("/first-counter/get-cache") + .get(appUrl + "/first-counter/get-cache") .then() .statusCode(204); }); // create the deleted cache counter again - when() - .put("/first-counter/reset-cache") - .then() - .statusCode(200) - .body(is("Cache=0")); - + String zeroCache = fillTheCache(appUrl + "/first-counter/reset-cache"); // try to increment counters - when() - .put("/first-counter/increment-counters") - .then() - .statusCode(200) - .body(is("Cache=1 Client=11")); + String firstEndpointCounters = fillTheCache(appUrl + "/first-counter/increment-counters"); + + assertEquals("Cache=0", zeroCache); + assertEquals("Cache=1 Client=11", firstEndpointCounters); } + /** + * Infinispan fail-over test. Test invoke a request on the Infinispan server which is currently down. + * Because of our settings in the hotrod-client.properties file, the application is trying to connect only once and only 1s. + * By default, the app is trying to connect 60 s with 10 retries even when the next tests continue. It means that the counter + * could be unexpectedly increased in one of the next tests + * + * Cache should be empty (status code 204). + * Client counter should be increased even if the server is down. + * + * @throws IOException + * @throws InterruptedException + */ @Test @Order(7) public void testInvokeOnFailedInfinispanCluster() throws IOException, InterruptedException { - resetCounters(); - incrementCountersOnValue(10); + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + + incrementCountersOnValue(appUrl + "/first-counter/increment-counters", 10); killInfinispanCluster(); // try to increment counters when() - .put("/first-counter/increment-counters") + .put(appUrl + "/first-counter/increment-counters") .then() - .statusCode(Matchers.allOf(Matchers.greaterThanOrEqualTo(500),Matchers.lessThan(600))); + .statusCode(Matchers.allOf(Matchers.greaterThanOrEqualTo(500), Matchers.lessThan(600))); restartInfinispanCluster(); // try to connect back to infinispan cluster and expect no content await().atMost(5, TimeUnit.MINUTES).untilAsserted(() -> { when() - .get("/first-counter/get-cache") + .get(appUrl + "/first-counter/get-cache") .then() .statusCode(204); }); // check the client counter - when() - .get("/first-counter/get-client") - .then() - .body(is("11")); + String clientCounter = getCounterValue(appUrl + "/first-counter/get-client"); + assertEquals("11", clientCounter); } - private void resetCounters() { - when() - .put("/first-counter/reset-cache") - .then() - .body(is("Cache=0")); + /** + * Check the connection to the second client (second Quarkus application). + * + * @throws OpenShiftTestException + */ + @Test + @Order(8) + public void testConnectSecondClient() throws OpenShiftTestException { + resetCacheCounter(appUrl + "/first-counter/reset-cache"); - when() - .put("/first-counter/reset-client") - .then() - .body(is("Client=0")); + String secondClientCache = getCounterValue(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/get-cache"); + assertEquals("0", secondClientCache); } - private void incrementCountersOnValue(int count) { - for (int i = 1; i <= count; i++) { - when() - .put("/first-counter/increment-counters") - .then() - .body(is("Cache=" + i + " Client=" + i)); - } + /** + * Testing the cache is shared between clients (apps). Every client has its own client counter. + * + * Clients counters should be increased only if the increase is called by their client. + * Cache counter is shared and should contain the sum of both client counters. + * + * @throws OpenShiftTestException + */ + @Test + @Order(9) + public void testMultipleClientIncrement() throws OpenShiftTestException { + // reset the first and client + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + resetClientCounter(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/reset-client"); + + // fill the cache in first and second client + incrementCountersOnValue(appUrl + "/first-counter/increment-counters", 10); + incrementCountersOnValue(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/increment-counters", 10); + + // save the cache counters in first and second client + String firstClientCacheCounter = getCounterValue(appUrl + "/first-counter/get-cache"); + String secondClientCacheCounter = getCounterValue(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/get-cache"); + + // save the client counters in first and second client + String firstClientAppCounter = getCounterValue(appUrl + "/first-counter/get-client"); + String secondClientAppCounter = getCounterValue(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/get-client"); + + assertEquals("10", firstClientAppCounter); + assertEquals("10", secondClientAppCounter); + + // sum of both client counters + String cacheValue = String.valueOf(Integer.valueOf(firstClientAppCounter) + Integer.valueOf(secondClientAppCounter)); + assertEquals(cacheValue, firstClientCacheCounter); + assertEquals(cacheValue, secondClientCacheCounter); } - private void killInfinispanCluster() throws IOException, InterruptedException { - adjustYml(CLUSTER_CONFIG_PATH, "replicas: 1", "replicas: 0"); - new Command("oc", "apply", "-f", CLUSTER_CONFIG_PATH).runAndWait(); - new Command("oc", "-n", CLUSTER_NAMESPACE_NAME, "wait", "--for", "condition=gracefulShutdown", "--timeout=300s", "infinispan/" + NEW_CLUSTER_NAME).runAndWait(); - } + /** + * Multiple client Infinispan fail-over test. Testing restart the infinispan cluster and increment/change counters values + * of both client applications after the restart. + * + * Cache newly created after the restart and incremented by 1 by each client so it should by on value 2. + * Client counters should remain the same during the restart and after the counters incrementing both are increased by 1. + * + * @throws IOException + * @throws InterruptedException + * @throws OpenShiftTestException + */ + @Test + @Order(10) + public void testMultipleClientDataAfterRestartInfinispanCluster() throws IOException, InterruptedException, OpenShiftTestException { + resetCacheCounter(appUrl + "/first-counter/reset-cache"); + resetClientCounter(appUrl + "/first-counter/reset-client"); + resetClientCounter(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/reset-client"); - private void restartInfinispanCluster() throws IOException, InterruptedException { - adjustYml(CLUSTER_CONFIG_PATH, "replicas: 0", "replicas: 1"); - new Command("oc", "apply", "-f", CLUSTER_CONFIG_PATH).runAndWait(); - new Command("oc", "-n", CLUSTER_NAMESPACE_NAME, "wait", "--for", "condition=wellFormed", "--timeout=300s", "infinispan/" + NEW_CLUSTER_NAME).runAndWait(); - } + // update the cache in both clients + String firstClientCounters = fillTheCache(appUrl + "/first-counter/increment-counters"); + String secondClientCounters = fillTheCache(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/increment-counters"); + + assertEquals("Cache=1 Client=1", firstClientCounters); + assertEquals("Cache=2 Client=1", secondClientCounters); + + killInfinispanCluster(); + restartInfinispanCluster(); + + // create the deleted cache counter again + resetCacheCounter(appUrl + "/first-counter/reset-cache"); - private static void adjustYml(String path, String originString, String newString) throws IOException { - Path yamlPath = Paths.get(path); - Charset charset = StandardCharsets.UTF_8; + // increment counters by the first and second client + firstClientCounters = fillTheCache(appUrl + "/first-counter/increment-counters"); + secondClientCounters = fillTheCache(openshift.getUrlFromRoute(SECOND_CLIENT_APPLICATION_NAME) + "/first-counter/increment-counters"); - String yamlContent = new String(Files.readAllBytes(yamlPath), charset); - yamlContent = yamlContent.replace(originString, newString); - Files.write(yamlPath, yamlContent.getBytes(charset)); + assertEquals("Cache=1 Client=2", firstClientCounters); + assertEquals("Cache=2 Client=2", secondClientCounters); } } diff --git a/infinispan-client/src/test/resources/infinispan_cluster_config.yaml b/infinispan-client/src/test/resources/infinispan_cluster_config.yaml index 20751f10..7a3e6b82 100644 --- a/infinispan-client/src/test/resources/infinispan_cluster_config.yaml +++ b/infinispan-client/src/test/resources/infinispan_cluster_config.yaml @@ -8,7 +8,9 @@ spec: service: type: Cache container: - memory: 1Gi + extraJvmOpts: "-XX:NativeMemoryTracking=summary" + cpu: "2000m" + memory: 2Gi security: endpointSecretName: connect-secret endpointEncryption: diff --git a/pom.xml b/pom.xml index 90762582..8248bbc9 100644 --- a/pom.xml +++ b/pom.xml @@ -78,7 +78,7 @@ 3.8.1 3.2.0 2.22.2 - 1.11.5.Final + 1.11.6.Final ${version.quarkus} 1.15.2