From 2cf1d5acaea50f0d1454e0125d7459c8d96f1b59 Mon Sep 17 00:00:00 2001 From: Marko Strukelj Date: Wed, 28 Jun 2023 19:40:47 +0200 Subject: [PATCH] Fix KeycloakRBACAuthorizer to work with `StandardAuthorizer` in KRAFT mode (#188) * Bump kafka-clients version to 3.4.0 Signed-off-by: Marko Strukelj * Update testsuite/kafka image version Signed-off-by: Marko Strukelj * Fix KeycloakRBACAuthorizer to work with `StandardAuthorizer` when in KRAFT mode Signed-off-by: Marko Strukelj * Introduce a new authorizer class KeycloakAuthorizer + Add authorization tests for Kraft mode Signed-off-by: Marko Strukelj * Documentation and code cleanup Signed-off-by: Marko Strukelj * More refactoring, code fixes and tests Signed-off-by: Marko Strukelj * Added more test and examples + fixed pools shutdown Signed-off-by: Marko Strukelj * Update README.md Signed-off-by: Marko Strukelj * Javadoc Signed-off-by: Marko Strukelj * Apply suggestions from code review Co-authored-by: Tom Bentley Co-authored-by: Jakub Scholz Signed-off-by: Marko Strukelj * Code comments, logger usage and code readability Signed-off-by: Marko Strukelj * More fixes and explanations Signed-off-by: Marko Strukelj * More javadoc and threading logic explanations Signed-off-by: Marko Strukelj * Fix JVM specific test failure due to different JVM gc() dynamics Signed-off-by: Marko Strukelj * Fix Spring example Signed-off-by: Marko Strukelj * Fix Spring example Signed-off-by: Marko Strukelj * Fix Spring example (debug Travis issue) Signed-off-by: Marko Strukelj * Fix Spring example (debug Travis issue) Signed-off-by: Marko Strukelj * Fix Spring example (debug Travis issue) Signed-off-by: Marko Strukelj * Increase containers startup timeout to accommodate Travis Signed-off-by: Marko Strukelj * Mark KeycloakRBACAuthorizer deprecated, log warning when configured as authorizer Signed-off-by: Marko Strukelj --------- Signed-off-by: Marko Strukelj Co-authored-by: Tom Bentley Co-authored-by: Jakub Scholz --- .travis/build.sh | 17 +- README.md | 134 +-- .../compose-authz-kraft.yml | 94 ++ .../kafka-oauth-strimzi/compose-authz.yml | 2 +- .../kafka-oauth-strimzi/compose-kraft.yml | 81 ++ .../kafka/config/log4j.properties | 6 +- .../kafka/simple_kafka_config.sh | 33 +- .../docker/kafka-oauth-strimzi/kafka/start.sh | 7 +- examples/docker/spring/Dockerfile | 2 +- examples/docker/spring/pom.xml | 5 + examples/docker/spring/test-spring.sh | 18 + .../oauth/common/BearerTokenWithPayload.java | 15 +- .../io/strimzi/kafka/oauth/common/Config.java | 40 +- .../kafka/oauth/common/ConfigUtil.java | 27 +- .../strimzi/kafka/oauth/common/JSONUtil.java | 41 + .../strimzi/kafka/oauth/common/TokenInfo.java | 64 +- .../kafka/oauth/services/Sessions.java | 52 +- oauth-keycloak-authorizer/pom.xml | 10 + .../oauth/server/authorizer/AuthzConfig.java | 50 +- .../server/authorizer/Configuration.java | 494 +++++++++ .../server/authorizer/GrantsHandler.java | 579 +++++++++++ .../authorizer/JwtKafkaPrincipalBuilder.java | 12 +- .../server/authorizer/KeycloakAuthorizer.java | 247 +++++ .../authorizer/KeycloakAuthorizerService.java | 36 + .../authorizer/KeycloakRBACAuthorizer.java | 707 ++++--------- .../oauth/server/authorizer/Semaphores.java | 56 + .../oauth/server/authorizer/UserSpec.java | 15 + .../OAuthKafkaPrincipalBuilderTest.java | 3 +- .../server/BearerTokenWithJsonPayload.java | 171 ++++ ...asServerOauthValidatorCallbackHandler.java | 85 +- .../oauth/server/OAuthKafkaPrincipal.java | 28 +- .../server/OAuthKafkaPrincipalBuilder.java | 57 +- .../server/MockBearerTokenWithPayload.java | 11 +- .../oauth/server/OAuthKafkaPrincipalTest.java | 44 +- .../server/OAuthSessionAuthorizerTest.java | 39 +- pom.xml | 9 +- .../oauth/common/ContainerLogLineReader.java | 51 + .../oauth/common/TestContainersWatcher.java | 13 +- .../testsuite/oauth/common/TestMetrics.java | 6 +- .../testsuite/oauth/common/TestUtil.java | 44 +- testsuite/docker/kafka/Dockerfile | 2 +- .../docker/kafka/config/log4j.properties | 1 + testsuite/docker/kafka/pom.xml | 2 +- .../kafka/scripts/simple_kafka_config.sh | 33 +- testsuite/docker/kafka/scripts/start.sh | 35 +- .../docker/kafka/scripts/start_no_wait.sh | 3 + .../docker/kafka/scripts/start_with_hydra.sh | 4 + .../keycloak/realms/kafka-authz-realm.json | 18 + .../testsuite/oauth/auth/BasicTests.java | 202 ++-- .../docker-compose.yml | 153 +++ testsuite/keycloak-authz-kraft-tests/pom.xml | 63 ++ .../KeycloakKRaftAuthorizationTests.java | 120 +++ .../test/resources/simplelogger.properties | 2 + testsuite/keycloak-authz-tests/pom.xml | 4 + .../testsuite/oauth/authz/BasicTest.java | 291 ++++++ .../strimzi/testsuite/oauth/authz/Common.java | 82 +- .../oauth/authz/ConfigurationTest.java | 2 +- .../testsuite/oauth/authz/FloodTest.java | 66 +- .../testsuite/oauth/authz/MetricsTest.java | 30 +- .../testsuite/oauth/authz/MultiSaslTest.java | 122 +-- .../oauth/authz/OAuthOverPlainTest.java | 4 +- .../testsuite/oauth/authz/RefreshTest.java | 9 +- .../testsuite/oauth/authz/ScramTest.java | 81 ++ .../testsuite/oauth/authz/SingletonTest.java | 39 + .../testsuite/oauth/authz/BasicTest.java | 219 ---- .../authz/KeycloakAuthorizationTests.java | 54 +- .../docker-compose.yml | 158 +++ testsuite/keycloak-authz-zk-tests/pom.xml | 63 ++ .../kraft/KeycloakZKAuthorizationTests.java | 122 +++ .../test/resources/simplelogger.properties | 2 + .../keycloak-errors-tests/docker-compose.yml | 2 +- .../server/AdminServerRequestHandler.java | 56 +- .../server/AuthServerRequestHandler.java | 114 ++- .../testsuite/oauth/server/Endpoint.java | 1 + .../server/MockOAuthServerMainVerticle.java | 22 +- .../strimzi/testsuite/oauth/server/Mode.java | 1 + .../testsuite/oauth/server/UserInfo.java | 15 + .../kafka/oauth/server/TestTokenFactory.java | 14 + .../server/authorizer/TestAuthzUtil.java | 12 + .../testsuite/oauth/MockOAuthTests.java | 17 +- .../testsuite/oauth/mockoauth/Common.java | 100 +- .../oauth/mockoauth/ConnectTimeoutTests.java | 3 +- .../mockoauth/KeycloakAuthorizerTest.java | 964 +++++++++++++++++- .../oauth/mockoauth/LogLineReader.java | 30 + .../test/resources/simplelogger.properties | 6 +- testsuite/pom.xml | 23 +- 86 files changed, 5214 insertions(+), 1557 deletions(-) create mode 100644 examples/docker/kafka-oauth-strimzi/compose-authz-kraft.yml create mode 100644 examples/docker/kafka-oauth-strimzi/compose-kraft.yml create mode 100755 examples/docker/spring/test-spring.sh create mode 100644 oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Configuration.java create mode 100644 oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/GrantsHandler.java create mode 100644 oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizer.java create mode 100644 oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizerService.java create mode 100644 oauth-server/src/main/java/io/strimzi/kafka/oauth/server/BearerTokenWithJsonPayload.java create mode 100644 testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/ContainerLogLineReader.java create mode 100644 testsuite/keycloak-authz-kraft-tests/docker-compose.yml create mode 100644 testsuite/keycloak-authz-kraft-tests/pom.xml create mode 100644 testsuite/keycloak-authz-kraft-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakKRaftAuthorizationTests.java create mode 100644 testsuite/keycloak-authz-kraft-tests/src/test/resources/simplelogger.properties create mode 100644 testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/BasicTest.java rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/Common.java (79%) rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java (97%) rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/FloodTest.java (87%) rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java (63%) rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java (58%) rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java (60%) rename testsuite/keycloak-authz-tests/src/{test => main}/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java (98%) create mode 100644 testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ScramTest.java create mode 100644 testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/SingletonTest.java delete mode 100644 testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/BasicTest.java create mode 100644 testsuite/keycloak-authz-zk-tests/docker-compose.yml create mode 100644 testsuite/keycloak-authz-zk-tests/pom.xml create mode 100644 testsuite/keycloak-authz-zk-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakZKAuthorizationTests.java create mode 100644 testsuite/keycloak-authz-zk-tests/src/test/resources/simplelogger.properties create mode 100644 testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/UserInfo.java create mode 100644 testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/TestTokenFactory.java create mode 100644 testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/authorizer/TestAuthzUtil.java create mode 100644 testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/LogLineReader.java diff --git a/.travis/build.sh b/.travis/build.sh index a286ae98..2448534a 100755 --- a/.travis/build.sh +++ b/.travis/build.sh @@ -33,6 +33,13 @@ mvn spotbugs:check # Also test examples build on different architectures (exclude ppc64le until fixed) if [ "$arch" != 'ppc64le' ]; then mvn clean install -f examples/docker + cd examples/docker + set +e + ./spring/test-spring.sh + EXIT=$? + cd ../.. + exitIfError + set -e fi # Run testsuite @@ -68,24 +75,24 @@ elif [[ "$arch" != 'ppc64le' ]]; then exitIfError clearDockerEnv - mvn -e -V -B clean install -f testsuite -Pkafka-3_2_3 + mvn -e -V -B clean install -f testsuite -Pkafka-3_2_3 -DfailIfNoTests=false -Dtest=\!KeycloakKRaftAuthorizationTests EXIT=$? exitIfError # Excluded by default to not exceed Travis job timeout - if [ "SKIP_DISABLED" == "false" ]; then + if [ "$SKIP_DISABLED" == "false" ]; then clearDockerEnv - mvn -e -V -B clean install -f testsuite -Pkafka-3_1_2 + mvn -e -V -B clean install -f testsuite -Pkafka-3_1_2 -DfailIfNoTests=false -Dtest=\!KeycloakKRaftAuthorizationTests,\!KeycloakZKAuthorizationTests EXIT=$? exitIfError clearDockerEnv - mvn -e -V -B clean install -f testsuite -Pkafka-3_0_0 + mvn -e -V -B clean install -f testsuite -Pkafka-3_0_0 -DfailIfNoTests=false -Dtest=\!KeycloakKRaftAuthorizationTests,\!KeycloakZKAuthorizationTests EXIT=$? exitIfError clearDockerEnv - mvn -e -V -B clean install -f testsuite -Pkafka-2_8_1 + mvn -e -V -B clean install -f testsuite -Pkafka-2_8_1 -DfailIfNoTests=false -Dtest=\!KeycloakKRaftAuthorizationTests,\!KeycloakZKAuthorizationTests EXIT=$? exitIfError fi diff --git a/README.md b/README.md index 09b4837e..7dfa947f 100644 --- a/README.md +++ b/README.md @@ -35,8 +35,8 @@ Strimzi Kafka OAuth modules provide support for OAuth2 as authentication mechani - [Enabling the re-authentication](#enabling-the-re-authentication) - [Enforcing the session timeout](#enforcing-the-session-timeout) - [Configuring the Kafka Broker authorization](#configuring-the-kafka-broker-authorization) - - [Enabling the KeycloakRBACAuthorizer](#enabling-the-keycloakrbacauthorizer) - - [Configuring the KeycloakRBACAuthorizer](#configuring-the-keycloakrbacauthorizer) + - [Enabling the KeycloakAuthorizer](#enabling-the-keycloakauthorizer) + - [Configuring the KeycloakAuthorizer](#configuring-the-keycloakauthorizer) - [Configuring the RBAC rules through Keycloak Authorization Services](#configuring-the-rbac-rules-through-keycloak-authorization-services) - [Configuring the Kafka client with SASL/OAUTHBEARER](#configuring-the-kafka-client-with-sasloauthbearer) - [Enabling SASL/OAUTHBEARER mechanism](#enabling-sasloauthbearer-mechanism) @@ -73,10 +73,10 @@ User authentication is then an outside step which user manually performs to obta In the simplest case, the client application can authenticate in its own name using client credentials - client id and secret. While the client secret is in this case packaged with application client, the benefit is still that it is not shared with application server (Kafka broker in our case) - the client first performs authentication against OAuth2 authorization server in exchange for an access token, which it then sends to the application server instead of its secret. -Access tokens can be independently tracked and revoked at will, and represent a limited access to resources on application server. +Access tokens can be independently tracked and revoked at will, and represent limited access to resources on application server. In a more advanced case, user authenticates and authorizes the application client in exchange for a token. -The client application is then packaged with only a long lived access token or a long lived refresh token. +The client application is then packaged with only a long-lived access token or a long-lived refresh token. User's username and password are never packaged with application client. If access token is configured it is sent directly to Kafka broker during session initialisation. But if refresh token is configured, it is first used to ask authorization server for a new access token, which is then sent to Kafka broker to start a new authenticated session. @@ -100,7 +100,7 @@ Authorization in Kafka is implemented completely separately and independently of Thus, it is possible to configure Kafka brokers to use OAuth2 based authentication, and at the same time the default ACL authorization. Since version 0.3.0 Strimzi Kafka OAuth provides token-based authorization using Keycloak as authorization server, and taking advantage of [Keycloak Authorization Services](https://www.keycloak.org/docs/latest/authorization_services/). -See [examples authorization README](examples/README-authz.md) for a demonstration on how to install, and use [KeycloakRBACAuthorizer](oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakRBACAuthorizer.java) which implements this functionality. +See [examples authorization README](examples/README-authz.md) for a demonstration on how to install, and use [KeycloakAuthorizer](oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizer.java) which implements this functionality. Building -------- @@ -110,7 +110,7 @@ Building Installing ---------- -Copy the following jars into your Kafka libs directory: +Copy the following jars into your Kafka `libs` directory: oauth-common/target/kafka-oauth-common-*.jar oauth-server/target/kafka-oauth-server-*.jar @@ -135,7 +135,7 @@ Also, you may want each developer to have a user account in order to configure u Configuring users, clients, and authorizing clients, obtaining access tokens, and refresh tokens are steps that are specific to the authorization server that you use. Consult your authorization server's documentation. -If you use the KeycloakRBACAuthorizer for authorization, then you have to use Keycloak or Keycloak based authorization server to configure security policies and permissions for users and service accounts. +If you use the `KeycloakAuthorizer` for authorization, then you have to use Keycloak or Keycloak based authorization server to configure security policies and permissions for users and service accounts. Configuring the Kafka Broker ---------------------------- @@ -145,7 +145,7 @@ Strimzi Kafka OAuth therefore needs to use JAAS configuration to activate SASL/O This is true for configuring the server side of the Kafka Broker, as well as for the Kafka client side - when using OAuth 2 for inter-broker communication. The authentication configuration specific to the Strimzi Kafka OAuth can be specified as part of JAAS configuration in the form of JAAS parameter values. -The authorization configuration for KeycloakRBACAuthorizer is specified as `server.properties` key-value pairs. +The authorization configuration for `KeycloakAuthorizer` is specified as `server.properties` key-value pairs. Both authentication and authorization configuration specific to Strimzi Kafka OAuth can also be set as ENV vars, or as Java system properties. The limitation here is that authentication configuration specified in this manner can not be listener-scoped. @@ -274,7 +274,7 @@ In terms of precedence the order of looking for a configuration is the following Whichever is found first is the one that is used. -Similarly for authorization the `strimzi.*` properties follow a similar lookup mechanism except that they don't use JAAS config, but are specified as `server.properties` keys. +Similarly, for authorization the `strimzi.*` properties follow a similar lookup mechanism except that they don't use JAAS config, but are specified as `server.properties` keys. For example, the order of looking for configuration for `strimzi.client.id` would be the following: - System property `strimzi.client.id` - ENV var `STRIMZI_CLIENT_ID` @@ -314,13 +314,13 @@ If the configured `oauth.client.id` is `kafka`, the following are valid examples - "kafka" - \["rest-api", "kafka"\] -JWT tokens contain unique user identification in `sub` claim. However, this is often a long number or a UUID, but we usually prefer to use human readable usernames, which may also be present in JWT token. +JWT tokens contain unique user identification in `sub` claim. However, this is often a long number or a UUID, but we usually prefer to use human-readable usernames, which may also be present in JWT token. Use `oauth.username.claim` to map the claim (attribute) where the value you want to use as user id is stored: - `oauth.username.claim` (e.g.: "preferred_username") If `oauth.username.claim` is specified the value of that claim is used instead, but if not set, the automatic fallback claim is the `sub` claim. -You can specify the secondary claim to fallback to, which allows you to map multiple account types into the same principal namespace: +You can specify the secondary claim to fall back to, which allows you to map multiple account types into the same principal namespace: - `oauth.fallback.username.claim` (e.g.: "client_id") - `oauth.fallback.username.prefix` (e.g.: "client-account-") @@ -355,7 +355,7 @@ You can control the minimum pause between two consecutive scheduled keys refresh All access tokens can be invalidated by rotating the keys on authorization server and expiring old keys. -Some authorization servers don't specify the `"use": "sig"` attribute in validation keys in the JWKS endpoint response. By default only the public keys with `"use": "sig"` are considered for signature validation. There is an option to ignore the `use` attribute, and consider all the keys for token signature validation: +Some authorization servers don't specify the `"use": "sig"` attribute in validation keys in the JWKS endpoint response. By default, only the public keys with `"use": "sig"` are considered for signature validation. There is an option to ignore the `use` attribute, and consider all the keys for token signature validation: - `oauth.jwks.ignore.key.use` (e.g.: "true" - ignore the `use` attribute on the keys in JWKS response) During the Kafka broker startup, a request to the JWKS endpoint immediately tries to load the keys. @@ -402,7 +402,7 @@ If the information is available we attempt to extract the user id from Introspec Use `oauth.username.claim` to map the attribute where the user id is stored: - `oauth.username.claim` (e.g.: "preferred_username") -You can fallback to a secondary attribute, which allows you to map multiple account types into the same user id namespace: +You can fall back to a secondary attribute, which allows you to map multiple account types into the same user id namespace: - `oauth.fallback.username.claim` (e.g.: "client_id") - `oauth.fallback.username.prefix` (e.g.: "client-account-") @@ -551,7 +551,7 @@ Then, you need to configure the `sasl.jaas.config` with client configuration opt All the Kafka brokers in the cluster should be configured with the same client ID and secret, and the corresponding user should be added to `super.users` since inter-broker client requires super-user permissions. -When you configure your listener to support OAuth, you can configure it to support OAUTHBEARER, but you can also configure it to support the OAuth over PLAIN as explained previously. PLAIN does not make much sense on the broker for inter-broker communication since OAUTHBEARER is supported. Therefore it is best to only use OAUTHBEARER mechanism for inter-broker communication. +When you configure your listener to support OAuth, you can configure it to support OAUTHBEARER, but you can also configure it to support the OAuth over PLAIN as explained previously. PLAIN does not make much sense on the broker for inter-broker communication since OAUTHBEARER is supported. Therefore, it is best to only use OAUTHBEARER mechanism for inter-broker communication. Specify the following `oauth.*` properties in `sasl.jaas.config` configuration: - `oauth.token.endpoint.uri` (e.g.: "https://localhost:8443/auth/realms/demo/protocol/openid-connect/token") @@ -647,7 +647,7 @@ The option can be specified per-listener. For example if you have a listener cal If re-authentication is enabled, the session timeout is enforced as the expiry time of the access token. By using re-authentication the multiple 'lightweight' sessions can follow one another over the same network connection for as long as the connection isn't closed or interrupted due to processes restarting or due to network issues. -If for some reason you can't enable re-authentication or don't want to use it, and if you want to invalidate the session when access token expires, but aren't using `KeycloakRBACAuthorizer`, which does this automatically (since version 0.6.0 of this library), you can use the `OAuthSessionAuthorizer` to enforce token expiry mid-session. +If for some reason you can't enable re-authentication or don't want to use it, and if you want to invalidate the session when access token expires, but aren't using `KeycloakAuthorizer`, which does this automatically (since version 0.6.0 of this library), you can use the `OAuthSessionAuthorizer` to enforce token expiry mid-session. `OAuthSessionAuthorizer` works by checking the access token expiry on every operation performed, and denies all access after the token has expired. As long as the token has not yet expired (it may have been recently invalidated at authorization server but the Kafka broker may not yet know about it) the authorization is delegated to the delegate authorizer. @@ -661,17 +661,17 @@ You configure the `SimpleAclAuthorizer` by specifying the same properties as if It's the same for any other authorizer you may use - instead of using `authorizer.class.name` you install it by using `strimzi.authorizer.delegate.class.name`. -Do not use `OAuthSessionAuthorizer` together with `KeycloakRBACAuthorizer` as that would be redundant. +Do not use `OAuthSessionAuthorizer` together with `KeycloakAuthorizer` as that would be redundant. If you don't use any authorizer at all, and don't use re-authentication, but still want to enforce access token expiry mid-session, don't specify the `strimzi.authorizer.delegate.class.name` at all. Instead, specify the following configuration: strimzi.authorizer.grant.when.no.delegate=true -In this case, unless the access token has expired, all the actions will be granted. The broker will behave as if no authorizer was installed, effectively turning every user into a 'super user'. +In this case, unless the access token has expired, all the actions will be granted. The broker will behave as if no authorizer was installed, effectively turning every user into a 'superuser'. The unauthenticated users, or users authenticated with a mechanism other than OAuth will also automatically have all the actions granted. -Note: When using SASL/PLAIN authentication in combination with `KeycloakRBACAuthorizer` or `OAuthSessionAuthorizer` the Kafka client session will expire when the access token expires. +Note: When using SASL/PLAIN authentication in combination with `KeycloakAuthorizer` or `OAuthSessionAuthorizer` the Kafka client session will expire when the access token expires. This will result in sudden appearance of the authorization failures. Since there is no way to pass a new access token mid-session (re-authenticate), the client will have to start a new session by establishing a new connection. @@ -681,30 +681,32 @@ Strimzi Kafka OAuth provides support to centrally manage not only users and clie Support for this works specifically with Keycloak Authorization Services. -By default, authorization is not enabled on Kafka Broker. There is `kafka.security.auth.SimpleAclAuthorizer` that comes with Kafka out-of-the-box, and is well documented in [Kafka Documentation](https://kafka.apache.org/documentation/). +By default, authorization is not enabled on Kafka Broker. There is `kafka.security.authorizer.AclAuthorizer` that comes with Kafka out-of-the-box and works with Zookeeper, and `org.apache.kafka.metadata.authorizer.StandardAuthorizer` that works in KRaft mode. +They behave the same and handle the standard Kafka ACL based permissions as documented in [Kafka Documentation](https://kafka.apache.org/documentation/). -Strimzi Kafka OAuth provides an alternative authorizer - `io.strimzi.kafka.oauth.server.authorizer.KeycloakRBACAuthorizer`. -`KeycloakRBACAuthorizer` uses the access token and the Token Endpoint of the same Keycloak realm used for OAuth2 authentication as a source of permission grants for the authenticated session. +Strimzi Kafka OAuth provides an alternative authorizer - `io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer`. +`KeycloakAuthorizer` uses the access token and the Token Endpoint of the same Keycloak realm used for OAuth2 authentication as a source of permission grants for the authenticated session. -#### Enabling the KeycloakRBACAuthorizer +Note: In `Kafka` versions prior to 3.3.x `io.strimzi.kafka.oauth.server.authorizer.KeycloakRBACAuthorizer` class should be used for the authorizer. For latest versions of `Kafka` the `KeycloakAuthorizer` which supports both KRaft mode and Zookeeper mode should be used. +The `KeycloakAuthorizer` detects the runtime environment, and delegates to `ACLAuthorizer` when in Zookeeper mode, and to `StandardAuthorizer` when in KRaft mode (as detected based on the presence of `process.roles` config property). -Add the following to `server.properties` file: +#### Enabling the KeycloakAuthorizer - authorizer.class.name=io.strimzi.kafka.oauth.server.authorizer.KeycloakRBACAuthorizer +Add the following to `server.properties` file: -Note: Since version 0.6.0 the `io.strimzi.kafka.oauth.server.authorizer.JwtKafkaPrincipalBuilder` has been deprecated. Use the above configuration instead. + authorizer.class.name=io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer You also need a properly configured OAuth authentication support, as described in [Configuring the Kafka broker authentication](#configuring-the-kafka-broker-authentication). -#### Configuring the KeycloakRBACAuthorizer +#### Configuring the KeycloakAuthorizer -All the configuration properties for KeycloakRBACAuthorizer begin with a `strimzi.authorization.` prefix. +All the configuration properties for KeycloakAuthorizer begin with a `strimzi.authorization.` prefix. -The token endpoint used by KeycloakRBACAuthorizer has to be the same as the one used for OAuth authentication: +The token endpoint used by KeycloakAuthorizer has to be the same as the one used for OAuth authentication: - `strimzi.authorization.token.endpoint.uri` (e.g.: "https://localhost:8443/auth/realms/demo/protocol/openid-connect/token" - the endpoint used to exchange the access token for a list of grants) - `strimzi.authorization.client.id` (e.g.: "kafka" - the client representing a Kafka Broker which has Authorization Services enabled) -The authorizer will regularly reload the list of grants for active sessions. By default it will do this once every minute. +The authorizer will regularly reload the list of grants for active sessions. By default, it will do this once every minute. You can change this period or turn it off for debugging reasons (by setting it to "0"): - `strimzi.authorization.grants.refresh.period.seconds` (e.g.: "120" - the refresh job period in seconds) @@ -726,18 +728,36 @@ A single client typically uses a single unique access token for the concurrent s As a result, the number of active tokens on the broker is generally less than the number of active sessions (connections). However, keep in mind that this is replicated across all Kafka brokers in the cluster, as clients maintain active sessions to multiple brokers. -New sessions will, by default, request the latest grants from the Keycloak in order for any changes in permissions to be reflected immediately. -You can change this, and reuse the grants for the token, if they have previously been fetched due to the same token already having been used -for another session on the broker. This can noticeably reduce the load from brokers to the Keycloak and can also help alleviate 'glitchiness' issues -addressed by `strimzi.authorization.http.retries`. However, as a result, the grants initially used for the new session may be out-of-sync with -Keycloak for up to `strimzi.authorization.grants.refresh.period.seconds`. -- `strimzi.authorization.reuse.grants` (e.g.: "true" - if enabled, then grants fetched for another session may be used) - -You may also want to configure some other things. You may want to set a logical cluster name so you can target it with authorization rules: +Keycloak Authorization Services requires an access token to provide the grants for the user session. In the context of Kafka authorization the permissions are tied to a specific user id / principal name. +Multiple sessions and multiple access tokens for the same user will receive the same set of grants. For that reason the grants are cached in Keycloak authorizer 'per user', rather than 'per access token'. +The authorizer by default checks if the grants for the current user are available in grants cache. If grants are available, the existing grants are used. If not, they are fetched from Keycloak using the current session's access token and the current thread. +Once the grants are cached they will stay in the cache for the duration of the session or until the access token used to fetch them expires or idles out due to inactivity. There is a background job that periodically refreshes the cached grants (see: `strimzi.authorization.grants.refresh.period.seconds`). +The consequence of such a behavior is that the grants used for a new session may be out of sync with the state on the Keycloak server for up to the configured grants refresh period. +Sometimes you may want (e.g. for the debug purposes, for any changes in permissions to be reflected immediately) to fetch fresh grants for each newly established session, rather than use the already cached ones. +Note that this can noticeably increase the load from brokers to the Keycloak and aggravate any 'glitchiness' issues in communication with the Keycloak. +To enable such behavior, set the following option to `false`. +- `strimzi.authorization.reuse.grants` (e.g.: "false" - if set to false, then when a new session is established the grants will be fetched from Keycloak using that session's access token and cached to grants cache) + +**Note** +This option used to be set to `false` by default in version 0.12.0. +In versions prior to 0.13.0 the grants were cached per access token, rather than per user id / principal name. + +The grants in the grants cache are shared between sessions of the same user id. To facilitate the timely removal from cache, the maximum time in seconds that a grant is kept in grants cache without being accessed can be configured. +It allows for reliable active releasing of memory rather than waiting for VM's gc() to kick in for the timed-out sessions. Normally, the open sessions should not just idly consume resources, rather they should perform some operations. +The default value for the maximum idle time of cached grants is 300 seconds. After that the grant will be removed from cache. If it is needed again it will be reloaded from the Keycloak server. +The following option can be used to set a custom value for the maximum idle time for a cached grant: +- `strimzi.authorization.grants.max.idle.time.seconds` (e.g.: "600" - if authorization grants for user are not access for more than ten minutes, remove them from grants cache) + +There is a background service that removes the idle grants and grants with expired access token from grants cache by periodically iterating over the cache. +The default time between two consecutive runs is 300 seconds. +The following option can be used to set a custom value for the job period: +- `strimzi.authorization.grants.gc.period.seconds` (e.g.: "600" - idle grants and grants with expired access token will be removed from grants cache every ten minutes) + +There are some other things you may also want to configure. You may want to set a logical cluster name so you can target it with authorization rules: - `strimzi.authorization.kafka.cluster.name` (e.g.: "dev-cluster" - a logical name of the cluster which can be targeted with authorization services resource definitions, and permission policies) -You can integrate KeycloakRBACAuthorizer with AclAuthorizer or StandardAuthorizer (in KRaft mode): -- `strimzi.authorization.delegate.to.kafka.acl` (e.g.: "true" - if enabled, then when action is not granted based on Keycloak Authorization Services grant it is delegated to SimpleACLAuthorizer which can still grant it.) +You can integrate KeycloakAuthorizer with AclAuthorizer (in Zookeeper mode) or StandardAuthorizer (in KRaft mode): +- `strimzi.authorization.delegate.to.kafka.acl` (e.g.: "true" - if enabled, then when action is not granted based on Keycloak Authorization Services grant it is delegated to ACLAuthorizer / StandardAuthorizer which can still grant it.) If you turn on authorization support in Kafka brokers, you need to properly set `super.users` property. By default, access token's `sub` claim is used as user id. @@ -762,17 +782,17 @@ For a more in-depth guide to using Keycloak Authorization Services see [the tuto In order to grant Kafka permissions to users or service accounts you have to use the Keycloak Authorization Services rules on the OAuth client that represents the Kafka Broker - typically this client has `kafka` as its client ID. The rules exist within the scope of this client, which means that if you have different Kafka clusters configured with different OAuth client IDs they would each have a separate set of permissions even though using the same set of users, and client accounts. -When the Kafka client authenticates using SASL/OAUTHEARER or SASL/PLAIN configured as 'OAuth over PLAIN' the KeycloakRBACAuthorizer retrieves the list of grants for the current session from the Keycloak server using the access token of the current session. +When the Kafka client authenticates using SASL/OAUTHEARER or SASL/PLAIN configured as 'OAuth over PLAIN' the KeycloakAuthorizer retrieves the list of grants for the current session from the Keycloak server using the access token of the current session. This list of grants is the result of evaluating the Keycloak Authorization Services policies and permissions. There are four concepts used to grant permissions: `resources`, `authorization scopes`, `policies`, and `permissions`. ##### Authorization scopes -Typically the initial configuration involves uploading the authorization scopes which creates a list of all the possible actions that can be performed on all the types of a Kafka resources. +Typically, the initial configuration involves uploading the authorization scopes which creates a list of all the possible actions that can be performed on all the types of a Kafka resources. This step is performed only once, before defining any permissions. Alternatively, the authorization scopes can be added manually, but make sure to not introduce typos. -The following authorization scopes can be used, mirroring the Kafka security model: `Create`, `Write`, `Read`, `Delete`, `Describe`, `Alter`, `DescribeConfig`, `AlterConfig`, `ClusterAction`. +The following authorization scopes can be used, mirroring the Kafka security model: `Create`, `Write`, `Read`, `Delete`, `Describe`, `Alter`, `DescribeConfigs`, `AlterConfigs`, `ClusterAction`, `IdempotentWrite`. ##### Resources @@ -790,7 +810,7 @@ A few examples: Group:orders-* Cluster:* -In addition, the general pattern can be prefixed by another one of the format `kafka-cluster`:CLUSTER_NAME, followed by comma, where cluster name is the name configured to `KeycloakRBACAuthorizer` using `strimzi.authorization.kafka.cluster.name`. +In addition, the general pattern can be prefixed by another one of the format `kafka-cluster`:CLUSTER_NAME, followed by comma, where cluster name is the name configured to `KeycloakAuthorizer` using `strimzi.authorization.kafka.cluster.name`. For example: @@ -800,7 +820,7 @@ For example: When the `kafka-cluster` prefix is not present it is assumed to be `kafka-cluster:*`. When the resource is defined a list of possible authorization scopes relevant to the resource should be added to the list of scopes. -Currently this needs to be added for each resource definition based on whatever actions make sense for the targeted resource type. +Currently, this needs to be added for each resource definition based on whatever actions make sense for the targeted resource type. The Kafka security model understands the following actions on different resource types. @@ -970,7 +990,7 @@ For debug purposes you may want to properly configure which JWT token attribute This does not affect how Kafka client is presented to the Kafka Broker. The broker performs user id extraction from the token once again or it uses the Introspection Endpoint or the User Info Endpoint to get the user id. -By default the user id on the Kafka client is obtained from `sub` claim in the token - only if token is JWT. +By default, the user id on the Kafka client is obtained from `sub` claim in the token - only if token is JWT. Client side user id extraction is not possible when token is an opaque token - not JWT. You may want to explicitly specify the period the access token is considered valid. This allows you to shorten the token's lifespan. @@ -1092,7 +1112,7 @@ try { } ``` -Similarly for asynchronous API: +Similarly, for asynchronous API: ``` Producer producer = new KafkaProducer<>(props); @@ -1177,9 +1197,9 @@ You may want to explicitly set the random number implementation provider to use If you need to turn off certificate hostname verification set the following property to empty string: - `oauth.ssl.endpoint.identification.algorithm` (e.g. "") -These configuration properties can be used to configure truststore for `KeycloakRBACAuthorizer` as well, but they should be prefixed with `strimzi.authorization.` instead of `oauth.` when specifically targeting this authorizer (e.g.: `strimzi.authorization.ssl.truststore.location`). +These configuration properties can be used to configure truststore for `KeycloakAuthorizer` as well, but they should be prefixed with `strimzi.authorization.` instead of `oauth.` when specifically targeting this authorizer (e.g.: `strimzi.authorization.ssl.truststore.location`). -You may want to set these options globally as system properties or env vars to apply for all the listeners and the `KeycoakRBACAuthorizer` in which case you would use `oauth.` prefix. But when configured specifically for `KeycloakRBACAuthorizer` in `server.properties` you have to use `strimzi.authorization.` prefix. +You may want to set these options globally as system properties or env vars to apply for all the listeners and the `KeycloakAuthorizer` in which case you would use `oauth.` prefix. But when configured specifically for `KeycloakAuthorizer` in `server.properties` you have to use `strimzi.authorization.` prefix. Configuring the network timeouts for communication with authorization server @@ -1194,26 +1214,26 @@ Use the following configuration options to customize the connect and read timeou These options can be set as system properties, as env variables or as jaas properties as described in [Configuring the OAuth2](#configuring-the-oauth2). -These configuration properties can be used to configure timeouts for `KeycloakRBACAuthorizer` as well, but they should be prefixed with `strimzi.authorization.` instead of `oauth.` when specifically targeting this authorizer (e.g.: `strimzi.authorization.connect.timeout.seconds`). +These configuration properties can be used to configure timeouts for `KeycloakAuthorizer` as well, but they should be prefixed with `strimzi.authorization.` instead of `oauth.` when specifically targeting this authorizer (e.g.: `strimzi.authorization.connect.timeout.seconds`). -You may want to set these options globally as system properties or env vars to apply for all the listeners and the `KeycoakRBACAuthorizer` in which case you would use `oauth.` prefix. But when configured specifically for `KeycloakRBACAuthorizer` in `server.properties` you have to use `strimzi.authorization.` prefix. +You may want to set these options globally as system properties or env vars to apply for all the listeners and the `KeycloakAuthorizer` in which case you would use `oauth.` prefix. But when configured specifically for `KeycloakAuthorizer` in `server.properties` you have to use `strimzi.authorization.` prefix. NOTE: These options are available since version 0.10.0. Before, one could only apply JDK network options `sun.net.client.defaultConnectTimeout`, and `sun.net.client.defaultReadTimeout` as described [here](https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html), and the default was `no timeout`. Configuring the metrics ----------------------- -By default, the gathering and exporting of metrics is disabled. Metrics are available to get an insight into the performance and failures during token validation, authorization operations and client authentication to the authorization server. You can also monitor the authorization server requests by background services such as refreshing of JWKS keys and refreshing of grants when `KeycloakRBACAuthorizer` is used. +By default, the gathering and exporting of metrics is disabled. Metrics are available to get an insight into the performance and failures during token validation, authorization operations and client authentication to the authorization server. You can also monitor the authorization server requests by background services such as refreshing of JWKS keys and refreshing of grants when `KeycloakAuthorizer` is used. You can enable metrics for token validation on the Kafka broker or for client authentication on the client by setting the following JAAS option to `true`: - `oauth.enable.metrics` (e.g.: "true") -You can enable metrics for `KeycloakRBACAuthorizer` by setting an analogous option in Kafka broker's `server.properties` file: +You can enable metrics for `KeycloakAuthorizer` by setting an analogous option in Kafka broker's `server.properties` file: - `strimzi.authorization.enable.metrics` (e.g.: "true") -If `OAUTH_ENABLE_METRICS` env variable is set or if `oauth.enable.metrics` system property is set, that will both also enable the metrics for `KeycloakRBACAuthorizer`. +If `OAUTH_ENABLE_METRICS` env variable is set or if `oauth.enable.metrics` system property is set, that will both also enable the metrics for `KeycloakAuthorizer`. -If `oauth.config.id` is specified in JAAS configuration of the listener or the client, it will be available in MBean / metric name as `contextId` attribute. If not specified, it will be calculated from JAAS configuration for the validator or default to `client` in client JAAS config, or `keycloak-authorizer` for KeycloakRBACAuthorizer metrics. +If `oauth.config.id` is specified in JAAS configuration of the listener or the client, it will be available in MBean / metric name as `contextId` attribute. If not specified, it will be calculated from JAAS configuration for the validator or default to `client` in client JAAS config, or `keycloak-authorizer` for KeycloakAuthorizer metrics. Metrics are exposed through JMX managed beans. They can also be exposed as Prometheus metrics by using the Prometheus JMX Exporter agent, and mapping the JMX metrics names to prometheus metrics names. @@ -1288,7 +1308,7 @@ For client-side authentication there are: The meaning of the variables used in the above names is as follows. - `$CONFIG_ID` - The value specified as `oauth.config.id` configuration option. If not specified it is set to `client` for Kafka client, `kafka-authorizer` for `KeycloakRBACAuthorizer`, or calculated from other configuration parameters for the validation on Kafka broker. + The value specified as `oauth.config.id` configuration option. If not specified it is set to `client` for Kafka client, `kafka-authorizer` for `KeycloakAuthorizer`, or calculated from other configuration parameters for the validation on Kafka broker. - `$HOST:$PORT` The hostname and port used to connect to authorization server. Extracted from the configured value for `oauth.token.endpoint.uri`, `oauth.introspect.endpoint.uri`, `oauth.userinfo.endpoint.uri`, `oauth.jwks.endpoint.uri` or `strimzi.authorization.token.endpoint.uri` (depending on the context). If the port is not part of the uri it is defaulted to `80` for `http`, and to `443` for `https`. - `$PATH` @@ -1388,7 +1408,7 @@ Troubleshooting There are many reasons the token can be invalid, and rejected by Kafka broker during authentication. -Here is a check list of most common problems: +Here is a checklist of most common problems: * The client should use the same HOST and PORT when connecting to the Authorization Server as the Kafka broker. @@ -1412,4 +1432,4 @@ The JWT tokens are signed by the authorization server when they are issued. The The client may have obtained a new access token, but the Kafka broker has not yet refreshed the public keys from JWKS endpoint resulting in a mismatch. The Kafka Broker will automatically refresh JWT keys if it encounters an unknown `kid`, and the problem will self-correct in this case, you may just need to repeat your request a few times. -It can also happen the other way around. Your existing client may still use the refresh token or the access token issued by the previous authorization server instance while the Kafka broker has already refreshed the keys from JWKS endpoint - resulting in a mismatch between the private key used by authorization server to sign the token, and the published public keys (JWKS endpoint). Since the problem is on the client you may need to configure your client with a newly obtained refresh token, or access token. If you configure your client with clientId and secret, it should auto-correct by itself, you just need to restart it. +It can also happen the other way around. Your existing client may still use the refresh token or the access token issued by the previous authorization server instance while the Kafka broker has already refreshed the keys from JWKS endpoint - resulting in a mismatch between the private key used by authorization server to sign the token, and the published public keys (JWKS endpoint). Since the problem is on the client you may need to configure your client with a newly obtained refresh token, or access token. If you configure your client with clientId and secret, it should autocorrect by itself, you just need to restart it. diff --git a/examples/docker/kafka-oauth-strimzi/compose-authz-kraft.yml b/examples/docker/kafka-oauth-strimzi/compose-authz-kraft.yml new file mode 100644 index 00000000..f2e1f2e7 --- /dev/null +++ b/examples/docker/kafka-oauth-strimzi/compose-authz-kraft.yml @@ -0,0 +1,94 @@ +version: '3.5' + +services: + + #################################### KAFKA BROKER #################################### + kafka: + image: strimzi/example-kafka + build: kafka-oauth-strimzi/kafka/target + container_name: kafka + ports: + - 9091:9091 + - 9092:9092 + + # javaagent debug port + #- 5006:5006 + command: + - /bin/bash + - -c + - cd /opt/kafka && ./start.sh --kraft + + environment: + + # Java Debug + #KAFKA_DEBUG: y + #DEBUG_SUSPEND_FLAG: y + #JAVA_DEBUG_PORT: 5006 + + # + # KAFKA Configuration + # + LOG_DIR: /home/kafka/logs + KAFKA_PROCESS_ROLES: "broker,controller" + KAFKA_NODE_ID: "1" + KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka:9091" + KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER + KAFKA_SASL_MECHANISM_CONTROLLER_PROTOCOL: PLAIN + + KAFKA_LISTENERS: "CONTROLLER://kafka:9091,CLIENT://kafka:9092" + KAFKA_ADVERTISED_LISTENERS: "CLIENT://kafka:9092" + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:SASL_PLAINTEXT,CLIENT:SASL_PLAINTEXT" + + KAFKA_SASL_ENABLED_MECHANISMS: OAUTHBEARER + KAFKA_INTER_BROKER_LISTENER_NAME: CLIENT + KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: OAUTHBEARER + + KAFKA_PRINCIPAL_BUILDER_CLASS: io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder + + KAFKA_LISTENER_NAME_CONTROLLER_SASL_ENABLED_MECHANISMS: PLAIN + KAFKA_LISTENER_NAME_CONTROLLER_PLAIN_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-password\" user_admin=\"admin-password\" user_bobby=\"bobby-secret\" ;" + + KAFKA_LISTENER_NAME_CLIENT_SASL_ENABLED_MECHANISMS: OAUTHBEARER + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required;" + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_SASL_LOGIN_CALLBACK_HANDLER_CLASS: io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS: io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_CONNECTIONS_MAX_REAUTH_MS: 3600000 + + # + # Strimzi OAuth Configuration + # + + # Authentication config + OAUTH_CLIENT_ID: "kafka" + OAUTH_CLIENT_SECRET: "kafka-secret" + OAUTH_TOKEN_ENDPOINT_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-kafka-authz}/protocol/openid-connect/token" + + # Validation config + OAUTH_VALID_ISSUER_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-kafka-authz}" + OAUTH_JWKS_ENDPOINT_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-kafka-authz}/protocol/openid-connect/certs" + #OAUTH_INTROSPECTION_ENDPOINT_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-kafka-authz}/protocol/openid-connect/token/introspect" + + # username extraction from JWT token claim + OAUTH_USERNAME_CLAIM: preferred_username + + # timeouts / refresh config + OAUTH_JWKS_REFRESH_MIN_PAUSE_SECONDS: "5" + OAUTH_CONNECT_TIMEOUT_SECONDS: "20" + OAUTH_READ_TIMEOUT_SECONDS: "20" + + # Keycloak authorization + KAFKA_AUTHORIZER_CLASS_NAME: io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer + + KAFKA_STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME: my-cluster + KAFKA_STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL: "true" + KAFKA_STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE: "4" + KAFKA_STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS: "60" + + KAFKA_SUPER_USERS: "User:admin;User:service-account-kafka-broker" + + # For start.sh script to know where the keycloak is listening + KEYCLOAK_HOST: ${KEYCLOAK_HOST:-keycloak} + REALM: ${REALM:-kafka-authz} diff --git a/examples/docker/kafka-oauth-strimzi/compose-authz.yml b/examples/docker/kafka-oauth-strimzi/compose-authz.yml index 814289d3..7c96f243 100644 --- a/examples/docker/kafka-oauth-strimzi/compose-authz.yml +++ b/examples/docker/kafka-oauth-strimzi/compose-authz.yml @@ -50,7 +50,7 @@ services: KAFKA_AUTHORIZER_CLASS_NAME: io.strimzi.kafka.oauth.server.OAuthSessionAuthorizer KAFKA_PRINCIPAL_BUILDER_CLASS: io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder - KAFKA_STRIMZI_AUTHORIZER_DELEGATE_CLASS_NAME: io.strimzi.kafka.oauth.server.authorizer.KeycloakRBACAuthorizer + KAFKA_STRIMZI_AUTHORIZER_DELEGATE_CLASS_NAME: io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer KAFKA_STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME: my-cluster KAFKA_STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL: "true" diff --git a/examples/docker/kafka-oauth-strimzi/compose-kraft.yml b/examples/docker/kafka-oauth-strimzi/compose-kraft.yml new file mode 100644 index 00000000..5d817551 --- /dev/null +++ b/examples/docker/kafka-oauth-strimzi/compose-kraft.yml @@ -0,0 +1,81 @@ +version: '3.5' + +services: + + #################################### KAFKA BROKER #################################### + kafka: + image: strimzi/example-kafka + build: kafka-oauth-strimzi/kafka/target + container_name: kafka + ports: + - 9091:9091 + - 9092:9092 + + # javaagent debug port + #- 5005:5005 + command: + - /bin/bash + - -c + - cd /opt/kafka && ./start.sh --kraft + + environment: + + # Java Debug + #KAFKA_DEBUG: y + #DEBUG_SUSPEND_FLAG: y + #JAVA_DEBUG_PORT: 5005 + + # + # KAFKA Configuration + # + LOG_DIR: /home/kafka/logs + KAFKA_PROCESS_ROLES: "broker,controller" + KAFKA_NODE_ID: "1" + KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka:9091" + KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER + KAFKA_SASL_MECHANISM_CONTROLLER_PROTOCOL: PLAIN + + KAFKA_LISTENERS: "CONTROLLER://kafka:9091,CLIENT://kafka:9092" + KAFKA_ADVERTISED_LISTENERS: "CLIENT://kafka:9092" + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:SASL_PLAINTEXT,CLIENT:SASL_PLAINTEXT" + + KAFKA_INTER_BROKER_LISTENER_NAME: CLIENT + KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: OAUTHBEARER + + KAFKA_PRINCIPAL_BUILDER_CLASS: "io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder" + + KAFKA_LISTENER_NAME_CONTROLLER_SASL_ENABLED_MECHANISMS: PLAIN + KAFKA_LISTENER_NAME_CONTROLLER_PLAIN_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-password\" user_admin=\"admin-password\" user_bobby=\"bobby-secret\" ;" + + KAFKA_LISTENER_NAME_CLIENT_SASL_ENABLED_MECHANISMS: OAUTHBEARER + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required;" + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_SASL_LOGIN_CALLBACK_HANDLER_CLASS: io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler + KAFKA_LISTENER_NAME_CLIENT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS: io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + + KAFKA_SUPER_USERS: "User:admin,User:service-account-kafka-broker" + + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + + + # + # Strimzi OAuth Configuration + # + + # Authentication config + OAUTH_CLIENT_ID: "kafka-broker" + OAUTH_CLIENT_SECRET: "kafka-broker-secret" + OAUTH_TOKEN_ENDPOINT_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-demo}/protocol/openid-connect/token" + + # Validation config + OAUTH_VALID_ISSUER_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-demo}" + OAUTH_JWKS_ENDPOINT_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-demo}/protocol/openid-connect/certs" + #OAUTH_INTROSPECTION_ENDPOINT_URI: "http://${KEYCLOAK_HOST:-keycloak}:8080/auth/realms/${REALM:-demo}/protocol/openid-connect/token/introspect" + + + # username extraction from JWT token claim + OAUTH_USERNAME_CLAIM: preferred_username + OAUTH_CONNECT_TIMEOUT_SECONDS: "20" + + # For start.sh script to know where the keycloak is listening + KEYCLOAK_HOST: ${KEYCLOAK_HOST:-keycloak} + REALM: ${REALM:-demo} diff --git a/examples/docker/kafka-oauth-strimzi/kafka/config/log4j.properties b/examples/docker/kafka-oauth-strimzi/kafka/config/log4j.properties index 59521c2f..de53d7f8 100644 --- a/examples/docker/kafka-oauth-strimzi/kafka/config/log4j.properties +++ b/examples/docker/kafka-oauth-strimzi/kafka/config/log4j.properties @@ -62,11 +62,11 @@ log4j.logger.org.I0Itec.zkclient.ZkClient=INFO log4j.logger.org.apache.zookeeper=INFO # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) -log4j.logger.kafka=DEBUG -log4j.logger.org.apache.kafka=DEBUG +log4j.logger.kafka=INFO +log4j.logger.org.apache.kafka=INFO # Control Strimzi OAuth logging -log4j.logger.io.strimzi=TRACE +log4j.logger.io.strimzi=DEBUG # Change to DEBUG or TRACE to enable request logging log4j.logger.kafka.request.logger=WARN, requestAppender diff --git a/examples/docker/kafka-oauth-strimzi/kafka/simple_kafka_config.sh b/examples/docker/kafka-oauth-strimzi/kafka/simple_kafka_config.sh index 2ae04f26..48ad9c60 100755 --- a/examples/docker/kafka-oauth-strimzi/kafka/simple_kafka_config.sh +++ b/examples/docker/kafka-oauth-strimzi/kafka/simple_kafka_config.sh @@ -52,17 +52,37 @@ done # # Generate output # -echo "#" -echo "# strimzi.properties" -echo "#" -echo broker.id=`pop_value broker.id 0` +if [[ "$1" == "--kraft" ]]; then + # + # Output kraft version of server.properties + # + echo "#" + echo "# strimzi.properties (kraft)" + echo "#" + + echo process.roles=`pop_value process.roles broker,controller` + echo node.id=`pop_value node.id 1` + echo log.dirs=`pop_value log.dirs /tmp/kraft-combined-logs` + +elif [[ "$1" == "" ]]; then + echo "#" + echo "# strimzi.properties" + echo "#" + + echo broker.id=`pop_value broker.id 0` + echo log.dirs=`pop_value log.dirs /tmp/kafka-logs` + echo group.initial.rebalance.delay.ms=`pop_value group.initial.rebalance.delay.ms 0` +else + echo "Unsupported argument: $1" + exit 1 +fi + echo num.network.threads=`pop_value num.network.threads 3` echo num.io.threads=`pop_value num.io.threads 8` echo socket.send.buffer.bytes=`pop_value socket.send.buffer.bytes 102400` echo socket.receive.buffer.bytes=`pop_value socket.receive.buffer.bytes 102400` echo socket.request.max.bytes=`pop_value socket.request.max.bytes 104857600` -echo log.dirs=`pop_value log.dirs /tmp/kafka-logs` echo num.partitions=`pop_value num.partitions 1` echo num.recovery.threads.per.data.dir=`pop_value num.recovery.threads.per.data.dir 1` echo offsets.topic.replication.factor=`pop_value offsets.topic.replication.factor 1` @@ -71,9 +91,6 @@ echo transaction.state.log.min.isr=`pop_value transaction.state.log.min.isr 1` echo log.retention.hours=`pop_value log.retention.hours 168` echo log.segment.bytes=`pop_value log.segment.bytes 1073741824` echo log.retention.check.interval.ms=`pop_value log.retention.check.interval.ms 300000` -echo zookeeper.connect=`pop_value zookeeper.connect localhost:2181` -echo zookeeper.connection.timeout.ms=`pop_value zookeeper.connection.timeout.ms 6000` -echo group.initial.rebalance.delay.ms=`pop_value group.initial.rebalance.delay.ms 0` # # Add what remains of KAFKA_* env vars diff --git a/examples/docker/kafka-oauth-strimzi/kafka/start.sh b/examples/docker/kafka-oauth-strimzi/kafka/start.sh index 12d1a4b2..9c483c8c 100755 --- a/examples/docker/kafka-oauth-strimzi/kafka/start.sh +++ b/examples/docker/kafka-oauth-strimzi/kafka/start.sh @@ -14,12 +14,17 @@ wait_for_url "$URI/realms/${REALM:-demo}" "Waiting for realm '${REALM}' to be av if [ "$SERVER_PROPERTIES_FILE" == "" ]; then echo "Generating a new strimzi.properties file using ENV vars" - ./simple_kafka_config.sh | tee /tmp/strimzi.properties + ./simple_kafka_config.sh $1 | tee /tmp/strimzi.properties else echo "Using provided server.properties file: $SERVER_PROPERTIES_FILE" cp $SERVER_PROPERTIES_FILE /tmp/strimzi.properties fi +if [[ "$1" == "--kraft" ]]; then + KAFKA_CLUSTER_ID="$(/opt/kafka/bin/kafka-storage.sh random-uuid)" + /opt/kafka/bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c /tmp/strimzi.properties +fi + # add Strimzi kafka-oauth-* jars and their dependencies to classpath export CLASSPATH="/opt/kafka/libs/strimzi/*:$CLASSPATH" diff --git a/examples/docker/spring/Dockerfile b/examples/docker/spring/Dockerfile index d2f18066..1392d44c 100644 --- a/examples/docker/spring/Dockerfile +++ b/examples/docker/spring/Dockerfile @@ -1,4 +1,4 @@ -FROM adoptopenjdk/openjdk11:ubi-jre +FROM registry.access.redhat.com/ubi8/openjdk-17 ENTRYPOINT ["java", "-jar", "/usr/share/oauth/server.jar"] diff --git a/examples/docker/spring/pom.xml b/examples/docker/spring/pom.xml index b81812da..7bc54608 100644 --- a/examples/docker/spring/pom.xml +++ b/examples/docker/spring/pom.xml @@ -43,6 +43,11 @@ jackson-databind 2.13.4.2 + + com.fasterxml.jackson.core + jackson-core + 2.13.4 + org.bouncycastle bcprov-jdk15on diff --git a/examples/docker/spring/test-spring.sh b/examples/docker/spring/test-spring.sh new file mode 100755 index 00000000..530bcb1e --- /dev/null +++ b/examples/docker/spring/test-spring.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -x + +docker rm -f spring +docker run -d --name spring strimzi/example-spring +for i in {1..10} +do + sleep 1 + RESULT=$(docker logs spring | grep "Started SimpleAuthorizationServerApplication") + if [ "$RESULT" != "" ]; then + docker rm -f spring + exit 0 + fi +done + +echo "Failed to start Spring example" +docker logs spring +exit 1 diff --git a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/BearerTokenWithPayload.java b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/BearerTokenWithPayload.java index 56820f14..f9ffa6a1 100644 --- a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/BearerTokenWithPayload.java +++ b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/BearerTokenWithPayload.java @@ -4,6 +4,7 @@ */ package io.strimzi.kafka.oauth.common; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.Set; @@ -11,31 +12,31 @@ /** * This extension of OAuthBearerToken provides a way to associate any additional information with the token * at run time, that is cached for the duration of the client session. - * + *

* This class is the only notion of client session that we can get. Kafka code holds on to it for as long as the session is alive, * and then the object can be garbage collected. - * + *

* Successful re-authentication starts a new session without disconnecting the current connection, avoiding the need to re-establish * any existing TLS connection for example. - * + *

* Token is instantiated during authentication, but the 'payload' methods can be accessed later by custom extensions. * For example, it can be used by a custom authorizer to cache a parsed JWT token payload or to cache authorization grants for current session. */ public interface BearerTokenWithPayload extends OAuthBearerToken { /** - * Get the usage dependent object previously associated with this instance by calling {@link BearerTokenWithPayload#setPayload(Object)} + * Get the usage dependent object previously associated with this instance by calling {@link BearerTokenWithPayload#setPayload(com.fasterxml.jackson.databind.JsonNode)} * * @return The associated object */ - Object getPayload(); + JsonNode getPayload(); /** * Associate a usage dependent object with this instance * * @param payload The object to associate with this instance */ - void setPayload(Object payload); + void setPayload(JsonNode payload); /** * Get groups associated with this token (principal). @@ -50,7 +51,7 @@ public interface BearerTokenWithPayload extends OAuthBearerToken { * * @return Token content / details as a JSON object */ - ObjectNode getJSON(); + ObjectNode getClaimsJSON(); /** * This method returns an id of the current instance of this object. diff --git a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/Config.java b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/Config.java index 13d00513..052a4753 100644 --- a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/Config.java +++ b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/Config.java @@ -84,6 +84,8 @@ public class Config { private Map defaults; + Config delegate; + /** * Use this construtor if you only want to lookup configuration in system properties and env * without any default configuration. @@ -104,6 +106,17 @@ public Config(Properties p) { )); } + /** + * Use this constructor if you want to wrap another Config object and override some functionality + *

+ * You only need to override {@link #getValue(String, String)} in your extending class. + * + * @param delegate The Config object to delegate to + */ + public Config(Config delegate) { + this.delegate = delegate; + } + /** * Use this constructor to provide default values in case some configuration is not set through system properties or ENV. * @@ -115,7 +128,7 @@ public Config(Map p) { /** * Validate configuration by checking for unknown or missing properties. - * + *

* Override this method to provide custom validation. * * @throws RuntimeException if validation fails @@ -124,15 +137,15 @@ public void validate() {} /** * Get value for property key, returning fallback value if configuration for key is not found. - * + *

* This method first checks if system property exists for the key. * If not, it checks if env variable exists with the name derived from the key: - * + *

      *   key.toUpperCase().replace('-', '_').replace('.', '_');
-     *
+     * 
* If not, it checks if env variable with name equal to key exists. * Ultimately, it checks the defaults passed at Config object construction time. - * + *

* If no configuration is found for key, it returns the fallback value. * * @param key Config key @@ -140,6 +153,9 @@ public void validate() {} * @return Configuration value for specified key */ public String getValue(String key, String fallback) { + if (delegate != null) { + return delegate.getValue(key, fallback); + } // try system properties first String result = System.getProperty(key, null); @@ -174,7 +190,7 @@ public String getValue(String key, String fallback) { * @param key Config key * @return Config value */ - public String getValue(String key) { + public final String getValue(String key) { return getValue(key, null); } @@ -185,7 +201,7 @@ public String getValue(String key) { * @param fallback Fallback value * @return Config value */ - public int getValueAsInt(String key, int fallback) { + public final int getValueAsInt(String key, int fallback) { String result = getValue(key); return result != null ? Integer.parseInt(result) : fallback; } @@ -197,21 +213,21 @@ public int getValueAsInt(String key, int fallback) { * @param fallback Fallback value * @return Config value */ - public long getValueAsLong(String key, long fallback) { + public final long getValueAsLong(String key, long fallback) { String result = getValue(key); return result != null ? Long.parseLong(result) : fallback; } /** * Get value for property key as boolean or fallback value if not found - * + *

* Valid values are: "true", "false", "yes", "no", "y", "n", "1", "0" * * @param key Config key * @param fallback Fallback value * @return Config value */ - public boolean getValueAsBoolean(String key, boolean fallback) { + public final boolean getValueAsBoolean(String key, boolean fallback) { String result = getValue(key); try { return result != null ? isTrue(result) : fallback; @@ -226,7 +242,7 @@ public boolean getValueAsBoolean(String key, boolean fallback) { * @param key Config key * @return Config value */ - public URI getValueAsURI(String key) { + public final URI getValueAsURI(String key) { String result = getValue(key); try { return URI.create(result); @@ -254,7 +270,7 @@ public static boolean isTrue(String result) { /** * Convert property key to env key. - * + *

* Property key is converted to all uppercase, then all '.' and '-' characters are converted to '_' * * @param key A key of a property which should be converted to environment variable name diff --git a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/ConfigUtil.java b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/ConfigUtil.java index 382757ba..500a3f07 100644 --- a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/ConfigUtil.java +++ b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/ConfigUtil.java @@ -9,6 +9,7 @@ import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLSocketFactory; +import java.util.List; import java.util.Properties; /** @@ -68,7 +69,7 @@ public static void putIfNotNull(Properties p, String key, Object value) { * @return Configured value as int */ public static int getConnectTimeout(Config config) { - return getTimeout(config, Config.OAUTH_CONNECT_TIMEOUT_SECONDS); + return getTimeoutAppendingWarnings(config, Config.OAUTH_CONNECT_TIMEOUT_SECONDS, null); } /** @@ -78,20 +79,27 @@ public static int getConnectTimeout(Config config) { * @return Configured value as int */ public static int getReadTimeout(Config config) { - return getTimeout(config, Config.OAUTH_READ_TIMEOUT_SECONDS); + return getTimeoutAppendingWarnings(config, Config.OAUTH_READ_TIMEOUT_SECONDS, null); } /** * Resolve the configuration value for the key as a timeout in seconds with the default value of 60 * - * @param c the Config object - * @param key the configuration key + * @param c the Config object + * @param key the configuration key + * @param warnings a warnings list where any warnings should be added, and will later be logged as WARN + * * @return Configured value as int */ - public static int getTimeout(Config c, String key) { + public static int getTimeoutAppendingWarnings(Config c, String key, List warnings) { int timeout = c.getValueAsInt(key, 60); if (timeout <= 0) { - log.warn("The configured value of `" + key + "` (" + timeout + ") is <= 0 and will be ignored. Default used: 60 seconds"); + String msg = "The configured value of `" + key + "` (" + timeout + ") is <= 0 and will be ignored. Default used: 60 seconds"; + if (warnings != null) { + warnings.add(msg); + } else { + log.warn(msg); + } timeout = 60; } return timeout; @@ -104,14 +112,15 @@ public static int getTimeout(Config c, String key) { * @param c the Config object * @param key the configuration key * @param fallbackKey the fallback key + * @param warnings a warnings list where any warnings should be added, and will later be logged as WARN * @return Configured value as int */ - public static int getTimeoutConfigWithFallbackLookup(Config c, String key, String fallbackKey) { + public static int getTimeoutConfigWithFallbackLookup(Config c, String key, String fallbackKey, List warnings) { String result = c.getValue(key); if (result == null) { - return getTimeout(c, fallbackKey); + return getTimeoutAppendingWarnings(c, fallbackKey, warnings); } - return getTimeout(c, key); + return getTimeoutAppendingWarnings(c, key, warnings); } /** diff --git a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/JSONUtil.java b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/JSONUtil.java index a4a81561..a60803fd 100644 --- a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/JSONUtil.java +++ b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/JSONUtil.java @@ -6,6 +6,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.io.ByteArrayOutputStream; @@ -14,7 +15,10 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -166,4 +170,41 @@ public static List asListOfString(JsonNode arrayOrString, String delimit return result; } + + /** + * Set an array attribute on a JSON object to a collection of Strings + * + * @param target The target JSON object + * @param attrName An attribute name + * @param elements The collection of strings + * @return Newly created ArrayNode + */ + public static ArrayNode setArrayOfStringsIfNotNull(JsonNode target, String attrName, Collection elements) { + if (elements == null) { + return null; + } + if (!(target instanceof ObjectNode)) { + throw new IllegalArgumentException("Unexpected JSON Node type (not ObjectNode): " + target.getClass()); + } + + ArrayNode list = ((ObjectNode) target).putArray(attrName); + for (String g: elements) { + list.add(g); + } + return list; + } + + /** + * Convert a JSON array to a Set object + * + * @param list ArrayNode to convert + * @return Set containing the elements of the ArrayNode + */ + public static Set asSetOfNodes(ArrayNode list) { + HashSet result = new HashSet<>(); + for (JsonNode node: list) { + result.add(node); + } + return result; + } } diff --git a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/TokenInfo.java b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/TokenInfo.java index 4d254072..20710c1c 100644 --- a/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/TokenInfo.java +++ b/oauth-common/src/main/java/io/strimzi/kafka/oauth/common/TokenInfo.java @@ -11,11 +11,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.Objects; import java.util.Set; /** * TokenInfo encapsulates the information about the access token. - * + *

* It can also be used for storing extra application information associated with the access token by directly * accessing the payload JSON object. */ @@ -61,7 +62,7 @@ public class TokenInfo { /** * Create a new instance * - * @param payload The body of the JWT token or composed from authorization server's introspection endpoint response + * @param payload The body of the JWT token or composed of authorization server's introspection endpoint response * @param token The raw access token * @param principal The extracted user ID */ @@ -72,7 +73,7 @@ public TokenInfo(JsonNode payload, String token, String principal) { /** * Create a new instance * - * @param payload The body of the JWT token or composed from authorization server's introspection endpoint response + * @param payload The body of the JWT token or composed of authorization server's introspection endpoint response * @param token The raw access token * @param principal The extracted user ID * @param groups A set of groups extracted from JWT token or authorization server's inspect endpoint response @@ -105,14 +106,44 @@ public TokenInfo(JsonNode payload, String token, String principal, Set g * @param expiresAtMs The token's `expires at` time in millis */ public TokenInfo(String token, String scope, String principal, Set groups, long issuedAtMs, long expiresAtMs) { + this(token, + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(scope != null ? scope.split(" ") : new String[0]))), + principal, + groups, + issuedAtMs, + expiresAtMs, + null); + } + + /** + * + * @param token The raw access token + * @param scopes The list of scopes + * @param principal The extracted user ID + * @param groups A set of groups extracted from JWT token or authorization server's inspect endpoint response + * @param issuedAtMs The token's `issued at` time in millis + * @param expiresAtMs The token's `expires at` time in millis + * @param payload The body of the JWT token or composed of authorization server's introspection endpoint response + */ + @SuppressFBWarnings("EI_EXPOSE_REP2") + // See: https://spotbugs.readthedocs.io/en/stable/bugDescriptions.html#ei2-may-expose-internal-representation-by-incorporating-reference-to-mutable-object-ei-expose-rep2 + public TokenInfo(String token, Set scopes, String principal, Set groups, long issuedAtMs, long expiresAtMs, JsonNode payload) { + if (token == null) { + throw new IllegalArgumentException("token can't be null"); + } + if (principal == null) { + throw new IllegalArgumentException("principal can't be null"); + } this.token = token; this.principal = principal; this.groups = groups != null ? Collections.unmodifiableSet(groups) : null; this.issuedAt = issuedAtMs; this.expiresAt = expiresAtMs; - - String[] parsedScopes = scope != null ? scope.split(" ") : new String[0]; - scopes = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(parsedScopes))); + this.scopes = scopes; + if (payload != null && !(payload instanceof ObjectNode)) { + throw new IllegalArgumentException("Unexpected JSON Node type (not ObjectNode): " + payload.getClass()); + } + this.payload = (ObjectNode) payload; } /** @@ -177,7 +208,7 @@ public long issuedAtMs() { /** * Get the payload object passed during construction. - * + *

* The same instance, passed to the TokenInfo constructor is returned which makes it possible to add custom attributes * or make modifications during request processing. * @@ -187,4 +218,23 @@ public long issuedAtMs() { public ObjectNode payload() { return payload; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof TokenInfo)) return false; + TokenInfo tokenInfo = (TokenInfo) o; + return expiresAt == tokenInfo.expiresAt && + issuedAt == tokenInfo.issuedAt && + token.equals(tokenInfo.token) && + principal.equals(tokenInfo.principal) && + Objects.equals(groups, tokenInfo.groups) && + Objects.equals(scopes, tokenInfo.scopes) && + Objects.equals(payload, tokenInfo.payload); + } + + @Override + public int hashCode() { + return Objects.hash(token, expiresAt, principal, groups, issuedAt, scopes, payload); + } } diff --git a/oauth-common/src/main/java/io/strimzi/kafka/oauth/services/Sessions.java b/oauth-common/src/main/java/io/strimzi/kafka/oauth/services/Sessions.java index f5b3e444..be9a1c10 100644 --- a/oauth-common/src/main/java/io/strimzi/kafka/oauth/services/Sessions.java +++ b/oauth-common/src/main/java/io/strimzi/kafka/oauth/services/Sessions.java @@ -5,6 +5,8 @@ package io.strimzi.kafka.oauth.services; import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collections; @@ -13,14 +15,21 @@ import java.util.WeakHashMap; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static io.strimzi.kafka.oauth.common.LogUtil.mask; /** * Sessions entries should automatically get cleared as KafkaPrincipals for the sessions get garbage collected by JVM. * The size of `activeSessions` at any moment in time should be about the number of currently active sessions. + * They may also get removed by broker-side plugins like custom authorizers when it is determined that the token has expired. */ public class Sessions { + private static final Logger log = LoggerFactory.getLogger(Sessions.class); + private static final Object NONE = new Object(); /** @@ -49,6 +58,28 @@ public void remove(BearerTokenWithPayload token) { activeSessions.remove(token); } + /** + * Remove all the active sessions for the passed access token + * + * @param accessToken access token for which to remove the sessions + */ + public void removeAllWithMatchingAccessToken(String accessToken) { + // In order to prevent the possible ConcurrentModificationException in the middle of using an iterator + // we first make a local copy, then iterate over the copy + ArrayList values = new ArrayList<>(activeSessions.keySet()); + + // Remove matching + for (BearerTokenWithPayload token: values) { + if (accessToken.equals(token.value())) { + activeSessions.remove(token); + if (log.isDebugEnabled()) { + log.debug("Removed invalid session from sessions map (userId: {}, session: {}, token: {})", + token.principalName(), token.getSessionId(), mask(token.value())); + } + } + } + } + /** * Iterate over all active sessions (represented by stored token objects) applying a filter and submit a task to the passed executor for each passing token. * Return a list of {@link SessionFuture} instances. @@ -96,21 +127,18 @@ public void cleanupExpired() { } /** - * Iterate over sessions, and find the first element matching the filter. + * Get a list of objects retrieved by applying the passed mapping function to the current active sessions set. + *

+ * For example, you can get a list of all the princiapl names. * - * @param filter A filter to apply - * @return The first matching session + * @param mapper A mapping function + * @return A list of mapped results + * @param A return type */ - public BearerTokenWithPayload findFirst(Predicate filter) { - // In order to prevent the possible ConcurrentModificationException in the middle of using an iterator - // we first make a local copy, then iterate over the copy + public List map(Function mapper) { + cleanupExpired(); ArrayList values = new ArrayList<>(activeSessions.keySet()); - for (BearerTokenWithPayload token: values) { - if (filter.test(token)) { - return token; - } - } - return null; + return values.stream().map(mapper).collect(Collectors.toList()); } } diff --git a/oauth-keycloak-authorizer/pom.xml b/oauth-keycloak-authorizer/pom.xml index 68ddb163..f8374af1 100644 --- a/oauth-keycloak-authorizer/pom.xml +++ b/oauth-keycloak-authorizer/pom.xml @@ -49,6 +49,16 @@ provided + + org.apache.kafka + kafka-metadata + provided + + + com.github.spotbugs + spotbugs-annotations + provided + junit junit diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/AuthzConfig.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/AuthzConfig.java index a0933a93..e799005e 100644 --- a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/AuthzConfig.java +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/AuthzConfig.java @@ -14,95 +14,101 @@ public class AuthzConfig extends Config { /** - * "strimzi.authorization.client.id" + * Client id used by authorizer when requesting grants from Keycloak Authorization Services. */ public static final String STRIMZI_AUTHORIZATION_CLIENT_ID = "strimzi.authorization.client.id"; /** - * "strimzi.authorization.token.endpoint.uri" + * Keycloak token endpoint used to fetch grants for individual access token. */ public static final String STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI = "strimzi.authorization.token.endpoint.uri"; /** - * "strimzi.authorization.kafka.cluster.name" + * The cluster name used by this configuration which can be targeted in Keycloak Authorization Services by a resource name prefix 'cluster-name:$CLUSTER_NAME,'. */ public static final String STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME = "strimzi.authorization.kafka.cluster.name"; /** - * "strimzi.authorization.delegate.to.kafka.acl" + * If true, the authorization decision is delegated to standard kafka ACL authorizer for non-oauth listeners and whenever + * the Keycloak Authorization Services grants don't result in ALLOWED permission. */ public static final String STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL = "strimzi.authorization.delegate.to.kafka.acl"; /** - * "strimzi.authorization.grants.refresh.period.seconds" + * The time period in seconds for the background job to refresh the cached grants for active sessions. That allows changes in permissions to be detected for active sessions . */ public static final String STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS = "strimzi.authorization.grants.refresh.period.seconds"; /** - * "strimzi.authorization.grants.refresh.pool.size" + * The number of worker threads used by the background job that refreshes the grants. */ public static final String STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE = "strimzi.authorization.grants.refresh.pool.size"; /** - * "strimzi.authorization.http.retries" + * The maximum time in seconds that a grant is kept in grants cache without being accessed. It allows for active releasing of memory rather than waiting for VM's gc() to kick in. + */ + public static final String STRIMZI_AUTHORIZATION_GRANTS_MAX_IDLE_TIME_SECONDS = "strimzi.authorization.grants.max.idle.time.seconds"; + + /** + * A period in seconds for a background service that removes no-longer-used grants information from grants cache. + */ + public static final String STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS = "strimzi.authorization.grants.gc.period.seconds"; + + /** + * A maximum number of retries to attempt if the request to Keycloak token endpoint fails in unexpected way (connection timeout, read timeout, unexpected HTTP status code, unexpected response body). */ public static final String STRIMZI_AUTHORIZATION_HTTP_RETRIES = "strimzi.authorization.http.retries"; /** - * "strimzi.authorization.ssl.truststore.location" + * Truststore file location */ public static final String STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_LOCATION = "strimzi.authorization.ssl.truststore.location"; /** - * "strimzi.authorization.ssl.truststore.certificates" + * Trusted certificates in PEM format as alternative way to provide certs */ public static final String STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_CERTIFICATES = "strimzi.authorization.ssl.truststore.certificates"; /** - * "strimzi.authorization.ssl.truststore.password" + * Truststore password */ public static final String STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_PASSWORD = "strimzi.authorization.ssl.truststore.password"; /** - * "strimzi.authorization.ssl.truststore.type" + * Truststore type */ public static final String STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_TYPE = "strimzi.authorization.ssl.truststore.type"; /** - * "strimzi.authorization.ssl.secure.random.implementation" + * Pseudo random number generator implementation to use for HTTPS. */ public static final String STRIMZI_AUTHORIZATION_SSL_SECURE_RANDOM_IMPLEMENTATION = "strimzi.authorization.ssl.secure.random.implementation"; /** - * "strimzi.authorization.ssl.endpoint.identification.algorithm" + * Certificate checking method to use for HTTPS. */ public static final String STRIMZI_AUTHORIZATION_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM = "strimzi.authorization.ssl.endpoint.identification.algorithm"; /** - * "strimzi.authorization.connect.timeout.seconds" + * Connect timeout for connections to the token endpoint in seconds. */ public static final String STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS = "strimzi.authorization.connect.timeout.seconds"; /** - * "strimzi.authorization.read.timeout.seconds" + * Read timeout for connections to the token endpoint in seconds. */ public static final String STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS = "strimzi.authorization.read.timeout.seconds"; /** - * "strimzi.authorization.enable.metrics" + * Enable authorization specific metrics. */ public static final String STRIMZI_AUTHORIZATION_ENABLE_METRICS = "strimzi.authorization.enable.metrics"; /** - * "strimzi.authorization.reuse.grants" + * Reuse cached grants for the same principal (user id) possibly fetched by another session using a different access token. */ public static final String STRIMZI_AUTHORIZATION_REUSE_GRANTS = "strimzi.authorization.reuse.grants"; - /** - * Create a new instance - */ - AuthzConfig() {} - /** * Create a new instance * diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Configuration.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Configuration.java new file mode 100644 index 00000000..96e4302f --- /dev/null +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Configuration.java @@ -0,0 +1,494 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server.authorizer; + +import io.strimzi.kafka.oauth.client.ClientConfig; +import io.strimzi.kafka.oauth.common.Config; +import io.strimzi.kafka.oauth.common.ConfigException; +import io.strimzi.kafka.oauth.common.ConfigUtil; +import io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.stream.Collectors; + +import static io.strimzi.kafka.oauth.client.ClientConfig.OAUTH_TOKEN_ENDPOINT_URI; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_CONNECT_TIMEOUT_SECONDS; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_ENABLE_METRICS; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_READ_TIMEOUT_SECONDS; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_SSL_SECURE_RANDOM_IMPLEMENTATION; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_SSL_TRUSTSTORE_CERTIFICATES; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_SSL_TRUSTSTORE_LOCATION; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_SSL_TRUSTSTORE_PASSWORD; +import static io.strimzi.kafka.oauth.common.Config.OAUTH_SSL_TRUSTSTORE_TYPE; +import static io.strimzi.kafka.oauth.common.Config.isTrue; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_CLIENT_ID; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_ENABLE_METRICS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_MAX_IDLE_TIME_SECONDS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_HTTP_RETRIES; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_REUSE_GRANTS; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_SSL_SECURE_RANDOM_IMPLEMENTATION; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_CERTIFICATES; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_LOCATION; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_PASSWORD; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_TYPE; +import static io.strimzi.kafka.oauth.server.authorizer.AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI; + +/** + * The classes used to parse and store Authorizer configuration. + * It is used to compare two configurations for equality and used by KeycloakAuthorizer to maintain a single instance of active Authorizer + * in the JVM even when multiple authorizers are instantiated from a single authorizer configuration. + */ +@SuppressWarnings({"deprecation"}) +public class Configuration { + + private static final Logger log = LoggerFactory.getLogger(Configuration.class); + private static final String PRINCIPAL_BUILDER_CLASS = OAuthKafkaPrincipalBuilder.class.getName(); + private static final String DEPRECATED_PRINCIPAL_BUILDER_CLASS = JwtKafkaPrincipalBuilder.class.getName(); + + private final Map configMap; + + private final List logs = new LinkedList<>(); + + + private final boolean reuseGrants; + private final String clientId; + private final String clusterName; + private final boolean delegateToKafkaACL; + private final int grantsRefreshPeriodSeconds; + private final int grantsMaxIdleTimeSeconds; + private final int grantsRefreshPoolSize; + private final int gcPeriodSeconds; + private boolean isKRaft; + private String truststore; + private String truststoreData; + private String truststorePassword; + private String truststoreType; + private String prng; + private String certificateHostCheckAlgorithm; + private List superUsers = Collections.emptyList(); + private int httpRetries; + private boolean enableMetrics; + private URI tokenEndpointUrl; + private int connectTimeoutSeconds; + private int readTimeoutSeconds; + + /** + * Create a new Configuration instance + *

+ * If some configuration is invalid in a way that it can't be automatically fixed, the {@link ConfigException} is thrown. + * + * @param configs Configuration map passed by Kafka broker to {@link org.apache.kafka.server.authorizer.Authorizer#configure} method + */ + Configuration(Map configs) { + + this.configMap = configs; + + AuthzConfig authzConfig = convertToAuthzConfig(configs); + + String pbclass = (String) configMap.get("principal.builder.class"); + if (!PRINCIPAL_BUILDER_CLASS.equals(pbclass) && !DEPRECATED_PRINCIPAL_BUILDER_CLASS.equals(pbclass)) { + throw new ConfigException("This authorizer requires " + PRINCIPAL_BUILDER_CLASS + " as 'principal.builder.class'"); + } + + if (DEPRECATED_PRINCIPAL_BUILDER_CLASS.equals(pbclass)) { + logs.add(new Log(Log.Level.WARNING, "The '" + DEPRECATED_PRINCIPAL_BUILDER_CLASS + "' class has been deprecated, and may be removed in the future. Please use '" + PRINCIPAL_BUILDER_CLASS + "' as 'principal.builder.class' instead.")); + } + + configureTokenEndpoint(authzConfig); + + clientId = ConfigUtil.getConfigWithFallbackLookup(authzConfig, STRIMZI_AUTHORIZATION_CLIENT_ID, ClientConfig.OAUTH_CLIENT_ID); + if (clientId == null) { + throw new ConfigException("OAuth client id ('" + STRIMZI_AUTHORIZATION_CLIENT_ID + "') not set."); + } + + configureSSLFactory(authzConfig); + configureHostnameVerifier(authzConfig); + configureHttpTimeouts(authzConfig); + + String clusterName = authzConfig.getValue(STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME); + if (clusterName == null) { + clusterName = "kafka-cluster"; + } + this.clusterName = clusterName; + + delegateToKafkaACL = authzConfig.getValueAsBoolean(STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL, false); + + + configureSuperUsers(configs); + + // Number of threads that can perform token endpoint requests at the same time + grantsRefreshPoolSize = authzConfig.getValueAsInt(STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE, 5); + if (grantsRefreshPoolSize < 1) { + throw new ConfigException("Invalid value of '" + STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE + "': " + grantsRefreshPoolSize + ". Has to be >= 1."); + } + + // Less or equal zero means to never refresh + grantsRefreshPeriodSeconds = authzConfig.getValueAsInt(STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, 60); + grantsMaxIdleTimeSeconds = configureGrantsMaxIdleTimeSeconds(authzConfig); + + gcPeriodSeconds = configureGcPeriodSeconds(authzConfig); + + reuseGrants = authzConfig.getValueAsBoolean(STRIMZI_AUTHORIZATION_REUSE_GRANTS, true); + + configureHttpRetries(authzConfig); + + configureMetrics(authzConfig); + } + + /** + * When a new instance of the Configuration is created some configuration options may generate warnings. + * Use this method to print those warning to the log. + *

+ * This method decouples configuration creation from logging warnings. + */ + public void printLogs() { + for (Log line: logs) { + if (line.level == Log.Level.WARNING) { + log.warn(line.message); + } else { + log.debug(line.message); + } + } + } + + private int configureGrantsMaxIdleTimeSeconds(AuthzConfig config) { + int grantsMaxIdleTimeSeconds = config.getValueAsInt(STRIMZI_AUTHORIZATION_GRANTS_MAX_IDLE_TIME_SECONDS, 300); + if (grantsMaxIdleTimeSeconds <= 0) { + logs.add(new Log(Log.Level.WARNING, "'" + STRIMZI_AUTHORIZATION_GRANTS_MAX_IDLE_TIME_SECONDS + "' set to invalid value: " + grantsMaxIdleTimeSeconds + " (should be a positive number), using the default value: 300 seconds")); + grantsMaxIdleTimeSeconds = 300; + } + return grantsMaxIdleTimeSeconds; + } + + private int configureGcPeriodSeconds(AuthzConfig config) { + int gcPeriodSeconds = config.getValueAsInt(STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS, 300); + if (gcPeriodSeconds <= 0) { + logs.add(new Log(Log.Level.WARNING, "'" + STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS + "' set to invalid value: " + gcPeriodSeconds + ", using the default value: 300 seconds")); + gcPeriodSeconds = 300; + } + return gcPeriodSeconds; + } + + private void configureSuperUsers(Map configs) { + String users = (String) configs.get("super.users"); + if (users != null) { + superUsers = Arrays.stream(users.split(";")) + .map(UserSpec::of) + .collect(Collectors.toList()); + } + } + + private void configureSSLFactory(AuthzConfig config) { + truststore = ConfigUtil.getConfigWithFallbackLookup(config, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_LOCATION, OAUTH_SSL_TRUSTSTORE_LOCATION); + truststoreData = ConfigUtil.getConfigWithFallbackLookup(config, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_CERTIFICATES, OAUTH_SSL_TRUSTSTORE_CERTIFICATES); + truststorePassword = ConfigUtil.getConfigWithFallbackLookup(config, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_PASSWORD, OAUTH_SSL_TRUSTSTORE_PASSWORD); + truststoreType = ConfigUtil.getConfigWithFallbackLookup(config, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_TYPE, OAUTH_SSL_TRUSTSTORE_TYPE); + prng = ConfigUtil.getConfigWithFallbackLookup(config, + STRIMZI_AUTHORIZATION_SSL_SECURE_RANDOM_IMPLEMENTATION, OAUTH_SSL_SECURE_RANDOM_IMPLEMENTATION); + } + + private void configureHostnameVerifier(AuthzConfig config) { + String hostCheck = ConfigUtil.getConfigWithFallbackLookup(config, + STRIMZI_AUTHORIZATION_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM); + + if (hostCheck == null) { + hostCheck = "HTTPS"; + } + this.certificateHostCheckAlgorithm = hostCheck; + } + private void configureHttpRetries(AuthzConfig config) { + httpRetries = config.getValueAsInt(STRIMZI_AUTHORIZATION_HTTP_RETRIES, 0); + if (httpRetries < 0) { + throw new ConfigException("Invalid value of '" + STRIMZI_AUTHORIZATION_HTTP_RETRIES + "': " + httpRetries + ". Has to be >= 0."); + } + } + private void configureMetrics(AuthzConfig config) { + + String enableMetricsString = ConfigUtil.getConfigWithFallbackLookup(config, STRIMZI_AUTHORIZATION_ENABLE_METRICS, OAUTH_ENABLE_METRICS); + try { + enableMetrics = enableMetricsString != null && isTrue(enableMetricsString); + } catch (Exception e) { + throw new ConfigException("Bad boolean value for key: " + STRIMZI_AUTHORIZATION_ENABLE_METRICS + ", value: " + enableMetricsString); + } + } + + private void configureTokenEndpoint(AuthzConfig config) { + String endpoint = ConfigUtil.getConfigWithFallbackLookup(config, STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, + OAUTH_TOKEN_ENDPOINT_URI); + if (endpoint == null) { + throw new ConfigException("OAuth2 Token Endpoint ('" + STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI + "') not set."); + } + + try { + tokenEndpointUrl = new URI(endpoint); + } catch (URISyntaxException e) { + throw new ConfigException("Specified token endpoint uri is invalid: " + endpoint); + } + } + + private void configureHttpTimeouts(AuthzConfig config) { + List warnings = new LinkedList<>(); + connectTimeoutSeconds = ConfigUtil.getTimeoutConfigWithFallbackLookup(config, STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS, OAUTH_CONNECT_TIMEOUT_SECONDS, warnings); + readTimeoutSeconds = ConfigUtil.getTimeoutConfigWithFallbackLookup(config, STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS, OAUTH_READ_TIMEOUT_SECONDS, warnings); + + for (String message: warnings) { + logs.add(new Log(Log.Level.WARNING, message)); + } + } + + /** + * This method extracts the key=value configuration entries relevant for KeycloakRBACAuthorizer from + * Kafka properties configuration file (server.properties) and wraps them with AuthzConfig instance. + *

+ * Any new config options have to be added here in order to become visible, otherwise they will be ignored. + * + * @param configs Kafka configs map + * @return Config object + */ + static AuthzConfig convertToCommonConfig(Map configs) { + Properties p = new Properties(); + + // If you add a new config property, make sure to add it to this list + // otherwise it won't be picked + String[] keys = { + STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, + STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE, + STRIMZI_AUTHORIZATION_GRANTS_MAX_IDLE_TIME_SECONDS, + STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS, + STRIMZI_AUTHORIZATION_HTTP_RETRIES, + STRIMZI_AUTHORIZATION_REUSE_GRANTS, + STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL, + STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME, + STRIMZI_AUTHORIZATION_CLIENT_ID, + Config.OAUTH_CLIENT_ID, + STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, + OAUTH_TOKEN_ENDPOINT_URI, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_LOCATION, + OAUTH_SSL_TRUSTSTORE_LOCATION, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_CERTIFICATES, + OAUTH_SSL_TRUSTSTORE_CERTIFICATES, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_PASSWORD, + OAUTH_SSL_TRUSTSTORE_PASSWORD, + STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_TYPE, + OAUTH_SSL_TRUSTSTORE_TYPE, + STRIMZI_AUTHORIZATION_SSL_SECURE_RANDOM_IMPLEMENTATION, + OAUTH_SSL_SECURE_RANDOM_IMPLEMENTATION, + STRIMZI_AUTHORIZATION_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, + OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, + STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS, + OAUTH_CONNECT_TIMEOUT_SECONDS, + STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS, + OAUTH_READ_TIMEOUT_SECONDS, + STRIMZI_AUTHORIZATION_ENABLE_METRICS, + OAUTH_ENABLE_METRICS + }; + + // Copy over the keys + for (String key: keys) { + ConfigUtil.putIfNotNull(p, key, configs.get(key)); + } + + return new AuthzConfig(p); + } + + AuthzConfig convertToAuthzConfig(Map configs) { + AuthzConfig config = Configuration.convertToCommonConfig(configs); + isKRaft = detectKRaft(configs); + if (isKRaft) { + logs.add(new Log(Log.Level.DEBUG, "Detected KRaft mode ('process.roles' configured)")); + } + return config; + } + + private boolean detectKRaft(Map configs) { + // auto-detect KRaft mode + Object prop = configs.get("process.roles"); + String processRoles = prop != null ? String.valueOf(prop) : null; + return processRoles != null && processRoles.length() > 0; + } + + boolean isKRaft() { + return isKRaft; + } + + String getTruststore() { + return truststore; + } + + String getTruststoreData() { + return truststoreData; + } + + String getTruststorePassword() { + return truststorePassword; + } + + String getTruststoreType() { + return truststoreType; + } + + String getPrng() { + return prng; + } + + String getCertificateHostCheckAlgorithm() { + return certificateHostCheckAlgorithm; + } + + boolean isDelegateToKafkaACL() { + return delegateToKafkaACL; + } + + URI getTokenEndpointUrl() { + return tokenEndpointUrl; + } + + String getClientId() { + return clientId; + } + + boolean isReuseGrants() { + return reuseGrants; + } + + String getClusterName() { + return clusterName; + } + + int getGrantsRefreshPeriodSeconds() { + return grantsRefreshPeriodSeconds; + } + + int getGrantsMaxIdleTimeSeconds() { + return grantsMaxIdleTimeSeconds; + } + + int getGrantsRefreshPoolSize() { + return grantsRefreshPoolSize; + } + + int getGcPeriodSeconds() { + return gcPeriodSeconds; + } + + List getSuperUsers() { + return superUsers; + } + + int getHttpRetries() { + return httpRetries; + } + + boolean isEnableMetrics() { + return enableMetrics; + } + + int getConnectTimeoutSeconds() { + return connectTimeoutSeconds; + } + + int getReadTimeoutSeconds() { + return readTimeoutSeconds; + } + + Map getConfigMap() { + return configMap; + } + + private static class Log { + Level level; + String message; + + Log(Level level, String message) { + if (level == null) { + throw new IllegalArgumentException("level is null"); + } + this.level = level; + this.message = message; + } + + enum Level { + WARNING, + DEBUG + } + } + + @SuppressWarnings({"checkstyle:CyclomaticComplexity"}) + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Configuration that = (Configuration) o; + return reuseGrants == that.reuseGrants && + delegateToKafkaACL == that.delegateToKafkaACL && + grantsRefreshPeriodSeconds == that.grantsRefreshPeriodSeconds && + grantsMaxIdleTimeSeconds == that.grantsMaxIdleTimeSeconds && + grantsRefreshPoolSize == that.grantsRefreshPoolSize && + gcPeriodSeconds == that.gcPeriodSeconds && + isKRaft == that.isKRaft && + httpRetries == that.httpRetries && + enableMetrics == that.enableMetrics && + connectTimeoutSeconds == that.connectTimeoutSeconds && + readTimeoutSeconds == that.readTimeoutSeconds && + Objects.equals(clientId, that.clientId) && + Objects.equals(clusterName, that.clusterName) && + Objects.equals(truststore, that.truststore) && + Objects.equals(truststoreData, that.truststoreData) && + Objects.equals(truststorePassword, that.truststorePassword) && + Objects.equals(truststoreType, that.truststoreType) && + Objects.equals(prng, that.prng) && + Objects.equals(certificateHostCheckAlgorithm, that.certificateHostCheckAlgorithm) && + Objects.equals(superUsers, that.superUsers) && + Objects.equals(tokenEndpointUrl, that.tokenEndpointUrl); + } + + @Override + public int hashCode() { + return Objects.hash(reuseGrants, + clientId, + clusterName, + delegateToKafkaACL, + grantsRefreshPeriodSeconds, + grantsMaxIdleTimeSeconds, + grantsRefreshPoolSize, + gcPeriodSeconds, + isKRaft, + truststore, + truststoreData, + truststorePassword, + truststoreType, + prng, + certificateHostCheckAlgorithm, + superUsers, + httpRetries, + enableMetrics, + tokenEndpointUrl, + connectTimeoutSeconds, + readTimeoutSeconds); + } +} diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/GrantsHandler.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/GrantsHandler.java new file mode 100644 index 00000000..bc395f8f --- /dev/null +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/GrantsHandler.java @@ -0,0 +1,579 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server.authorizer; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; +import io.strimzi.kafka.oauth.common.HttpException; +import io.strimzi.kafka.oauth.common.JSONUtil; +import io.strimzi.kafka.oauth.services.ServiceException; +import io.strimzi.kafka.oauth.services.Services; +import io.strimzi.kafka.oauth.validator.DaemonThreadFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; + +import static io.strimzi.kafka.oauth.common.JSONUtil.asSetOfNodes; +import static io.strimzi.kafka.oauth.common.LogUtil.mask; + +/** + * The class that handles grants cache and services to maintain it. + *

+ * Grants are cached per user id. All sessions by the same user id share the cached grants. + * The underlying authentication for fetching the grants is still an access token. Different sessions + * with the same user id can authenticate using different access tokens. The assumption is that grants resolution + * for the user id produces the same set of grants regardless of the access token used to obtain grants. + * Access token for a specific user id with the longest expiry is held in the cache for a background grants refresh job. + * + * The instance of this class runs three background services: + *

    + *
  • Grants Refresh Scheduler A ScheduledExecutorService that wakes up on a fixed period and scans the grants cache, + * queueing a refresh for every grant that has a valid access token, and hasn't been idle for more than the max idle time. + * Grants with expired access tokens and idled out grants are removed from cache.
  • + *
  • Grants Refresh Worker A fixed thread pool ExecutorService that executes refresh jobs queued by Grants Refresh Scheduler
  • + *
  • GC Worker A ScheduledExecutorService that wakes up on a fixed period and removes grants for user ids for which there are no active sessions. + * It removes such grants from cache, so they are no longer refreshed.
  • + *
+ * + * When a new session triggers a first authorize() call, the grants are looked up in the cache. If grants are available they are returned and used. + * If not, they are fetched. If during that time any authorize() call comes in for the same user id it is made to wait for the results of the existing grants fetching job to be complete. + * This way we prevent multiple fetches of grants for the same user id when they are not yet in the cache. When grant is successfully retrieved it is cached, + * and returned to all the waiting authorize() threads. If fetching of grants is unsuccessful they all receive an exception, and all the authorize() actions are denied. + * In that case, since there is no grant cached for user id, the next authorize() request will attempt to fetch grants for that user id again using the current session's access token. + */ +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") +class GrantsHandler implements Closeable { + + private static final Logger log = LoggerFactory.getLogger(GrantsHandler.class); + + private final HashMap grantsCache = new HashMap<>(); + + /** Helper methods to queue up grants refresh jobs per user id */ + private final Semaphores semaphores = new Semaphores<>(); + + /** Grants Refresh Worker */ + private final ExecutorService refreshWorker; + + /** GC Worker */ + private final ScheduledExecutorService gcWorker; + + /** Grants Refresh Scheduler */ + private final ScheduledExecutorService refreshScheduler; + + private final long gcPeriodMillis; + + /** Externally provided function that performs an HTTP request to the Keycloak token / grants endpoint */ + private final Function authorizationGrantsProvider; + + /** Maximum number of retries to attempt if the grants refresh fails */ + private final int httpRetries; + + /** Maximum idle time in millis for a cached grant (time in which a grant has not been accessed in cache) */ + private final long grantsMaxIdleMillis; + + /** Used to check when the last gc run was performed in order to prevent gc runs from queueing up */ + private long lastGcRunTimeMillis; + + /** + * Cleanup background threads. + */ + @Override + public void close() { + shutDownExecutorService("grants refresh scheduler", refreshScheduler); + shutDownExecutorService("grants refresh worker", refreshWorker); + shutDownExecutorService("gc worker", gcWorker); + } + + private void shutDownExecutorService(String name, ExecutorService service) { + try { + log.trace("Shutting down {} [{}]", name, service); + service.shutdownNow(); + if (!service.awaitTermination(10, TimeUnit.SECONDS)) { + log.debug("[IGNORED] Failed to cleanly shutdown {} within 10 seconds", name); + } + } catch (Throwable t) { + log.warn("[IGNORED] Failed to cleanly shutdown {}: ", name, t); + } + } + + /** + * A grants record in grantsCache map containing a grants JSON, access token used to retrieve grants, token expiry info and last usage info. + * Multiple threads can access the instance of this class for read and write. + */ + static class Info { + private volatile String accessToken; + private volatile JsonNode grants; + private volatile long expiresAt; + private volatile long lastUsed; + + Info(String accessToken, long expiresAt) { + this.accessToken = accessToken; + this.expiresAt = expiresAt; + this.lastUsed = System.currentTimeMillis(); + } + + /** + * This method is called once per authorize() call to update the fact that grants have been accessed, + * and to update the access token and the associated info if it is found to be longer lived that the existing + * access token. + * + * @param token An object holding the access token and associated info + */ + synchronized void updateTokenIfExpiresLater(BearerTokenWithPayload token) { + lastUsed = System.currentTimeMillis(); + if (token.lifetimeMs() > expiresAt) { + accessToken = token.value(); + expiresAt = token.lifetimeMs(); + } + } + + String getAccessToken() { + return accessToken; + } + + JsonNode getGrants() { + return grants; + } + + void setGrants(JsonNode newGrants) { + grants = newGrants; + } + + long getLastUsed() { + return lastUsed; + } + + boolean isExpiredAt(long timestamp) { + return expiresAt < timestamp; + } + } + + /** + * A Future that provides a result of the scheduled grants refresh job. + * It wraps the Future instance returned by a call to refreshWorker.submit() + */ + static class Future implements java.util.concurrent.Future { + + private final java.util.concurrent.Future delegate; + private final String userId; + private final Info grantsInfo; + + /** + * Create a new instance + * + * @param userId User id that server as a key in grantsCache map + * @param grantsInfo Cached grants record in grantsCache map + * @param future Original future instance to wrap + */ + @SuppressFBWarnings("EI_EXPOSE_REP2") + public Future(String userId, GrantsHandler.Info grantsInfo, java.util.concurrent.Future future) { + this.userId = userId; + this.grantsInfo = grantsInfo; + this.delegate = future; + } + + /** + * Get a BearerTokenWithPayload object representing a session + * + * @return A token instance + */ + @SuppressFBWarnings("EI_EXPOSE_REP") + public GrantsHandler.Info getGrantsInfo() { + return grantsInfo; + } + + public String getUserId() { + return userId; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return delegate.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return delegate.isCancelled(); + } + + @Override + public boolean isDone() { + return delegate.isDone(); + } + + @Override + public JsonNode get() throws InterruptedException, ExecutionException { + return delegate.get(); + } + + @Override + public JsonNode get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return delegate.get(timeout, unit); + } + } + + /** + * Create a new GrantsHandler instance + * + * @param grantsRefreshPeriodSeconds Number of seconds between two consecutive grants refresh job runs + * @param grantsRefreshPoolSize The number of threads over which to spread a grants refresh job run + * @param grantsMaxIdleTimeSeconds An idle time in seconds during which the cached grant wasn't accesses by any session so is deemed unneeded, and can be garbage collected + * @param httpGrantsProvider A function with grant fetching logic + * @param httpRetries A maximum number of repeated attempts if a grants request to the token endpoint fails in unexpected way + * @param gcPeriodSeconds Number of seconds between two consecutive grants garbage collection job runs + */ + @SuppressFBWarnings("MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR") + GrantsHandler(int grantsRefreshPeriodSeconds, int grantsRefreshPoolSize, int grantsMaxIdleTimeSeconds, Function httpGrantsProvider, int httpRetries, int gcPeriodSeconds) { + this.authorizationGrantsProvider = httpGrantsProvider; + this.httpRetries = httpRetries; + + if (grantsMaxIdleTimeSeconds <= 0) { + throw new IllegalArgumentException("grantsMaxIdleTimeSeconds <= 0"); + } + this.grantsMaxIdleMillis = grantsMaxIdleTimeSeconds * 1000L; + + DaemonThreadFactory daemonThreadFactory = new DaemonThreadFactory(); + if (grantsRefreshPeriodSeconds > 0) { + this.refreshWorker = Executors.newFixedThreadPool(grantsRefreshPoolSize, daemonThreadFactory); + + // Set up periodic timer to fetch grants for active sessions every refresh seconds + this.refreshScheduler = Executors.newSingleThreadScheduledExecutor(daemonThreadFactory); + refreshScheduler.scheduleAtFixedRate(this::performRefreshGrantsRun, grantsRefreshPeriodSeconds, grantsRefreshPeriodSeconds, TimeUnit.SECONDS); + } else { + this.refreshWorker = null; + this.refreshScheduler = null; + } + + if (gcPeriodSeconds <= 0) { + throw new IllegalArgumentException("gcPeriodSeconds <= 0"); + } + this.gcPeriodMillis = gcPeriodSeconds * 1000L; + this.gcWorker = Executors.newSingleThreadScheduledExecutor(daemonThreadFactory); + gcWorker.scheduleAtFixedRate(this::gcGrantsCacheRunnable, gcPeriodSeconds, gcPeriodSeconds, TimeUnit.SECONDS); + } + + /** + * The function to call as a periodic job + */ + private void gcGrantsCacheRunnable() { + long timePassedSinceGc = System.currentTimeMillis() - lastGcRunTimeMillis; + if (timePassedSinceGc < gcPeriodMillis - 1000) { // give or take one second + log.debug("Skipped queued gc run (last run {} ms ago)", timePassedSinceGc); + return; + } + lastGcRunTimeMillis = System.currentTimeMillis(); + gcGrantsCache(); + } + + /** + * Perform one garbage collection run + */ + private void gcGrantsCache() { + long start = System.currentTimeMillis(); + HashSet userIds = new HashSet<>(Services.getInstance().getSessions().map(BearerTokenWithPayload::principalName)); + log.trace("Grants gc: active users: {}", userIds); + int beforeSize; + int afterSize; + synchronized (grantsCache) { + beforeSize = grantsCache.size(); + // keep the active sessions, remove grants for unknown user ids + grantsCache.keySet().retainAll(userIds); + afterSize = grantsCache.size(); + } + log.debug("Grants gc: active users count: {}, grantsCache size before: {}, grantsCache size after: {}, gc duration: {} ms", userIds.size(), beforeSize, afterSize, System.currentTimeMillis() - start); + } + + /** + * Fetch grants for the user using the access token contained in the grantsInfo, + * and save the result to the grantsInfo object. + * + * @param userId User id + * @param grantsInfo Grants info object representing the cached grants entry for the user + * @return Obtained grants + */ + private JsonNode fetchAndSaveGrants(String userId, Info grantsInfo) { + // If no grants found, fetch grants from server + JsonNode grants = null; + try { + log.debug("Fetching grants from Keycloak for user {}", userId); + grants = fetchGrantsWithRetry(grantsInfo.getAccessToken()); + if (grants == null) { + log.debug("Received null grants for user: {}, token: {}", userId, mask(grantsInfo.getAccessToken())); + grants = JSONUtil.newObjectNode(); + } + } catch (HttpException e) { + if (e.getStatus() == 403) { + grants = JSONUtil.newObjectNode(); + } else { + log.warn("Unexpected status while fetching authorization data - will retry next time: {}", e.getMessage()); + } + } + if (grants != null) { + // Store authz grants in the token, so they are available for subsequent requests + log.debug("Saving non-null grants for user: {}, token: {}", userId, mask(grantsInfo.getAccessToken())); + grantsInfo.setGrants(grants); + } + return grants; + } + + /** + * Method that performs the POST request to fetch grants for the token. + * In case of a connection failure or a non-200 status response this method immediately retries the request if so configured. + *

+ * Status 401 does not trigger a retry since it is used to signal an invalid token. + * Status 403 does not trigger a retry either since it signals no permissions. + * + * @param token The raw access token + * @return Grants JSON response + */ + private JsonNode fetchGrantsWithRetry(String token) { + + int i = 0; + do { + i += 1; + + try { + if (i > 1) { + log.debug("Grants request attempt no. {}", i); + } + return authorizationGrantsProvider.apply(token); + + } catch (Exception e) { + if (e instanceof HttpException) { + int status = ((HttpException) e).getStatus(); + if (403 == status || 401 == status) { + throw e; + } + } + + if (log.isInfoEnabled()) { + log.info("Failed to fetch grants on try no. {}", i, e); + } + if (i > httpRetries) { + log.debug("Failed to fetch grants after {} tries", i); + throw e; + } + } + } while (true); + } + + /** + * Perform a single grants refresh run + */ + private void performRefreshGrantsRun() { + try { + log.debug("Refreshing authorization grants ... [{}]", this); + + HashMap workmap; + synchronized (grantsCache) { + workmap = new HashMap<>(grantsCache); + } + + Set> entries = workmap.entrySet(); + List scheduled = new ArrayList<>(entries.size()); + long now = System.currentTimeMillis(); + + for (Map.Entry ent : entries) { + String userId = ent.getKey(); + Info grantsInfo = ent.getValue(); + if (grantsInfo.getLastUsed() < now - grantsMaxIdleMillis) { + log.debug("Skipping refreshing grants for user '{}' due to max idle time.", userId); + removeUserFromCacheIfExpiredOrIdle(userId); + } + scheduled.add(new Future(userId, grantsInfo, refreshWorker.submit(() -> { + + if (log.isTraceEnabled()) { + log.trace("Fetch grants for user: {}, token: {}", userId, mask(grantsInfo.getAccessToken())); + } + + JsonNode newGrants; + try { + newGrants = fetchGrantsWithRetry(grantsInfo.getAccessToken()); + } catch (HttpException e) { + // Handle Keycloak token / grants endpoint returning status 403 Forbidden + // 403 happens when no policy matches the token - thus there are no grants, no permission granted + if (403 == e.getStatus()) { + newGrants = JSONUtil.newObjectNode(); + } else { + throw e; + } + } + JsonNode oldGrants = grantsInfo.getGrants(); + if (!semanticGrantsEquals(newGrants, oldGrants)) { + if (log.isDebugEnabled()) { + log.debug("Grants have changed for user: {}; before: {}; after: {}", userId, oldGrants, newGrants); + } + grantsInfo.setGrants(newGrants); + } + + // Only added here to allow compiler to resolve the lambda as a Callable + return newGrants; + }))); + } + + for (GrantsHandler.Future f : scheduled) { + try { + f.get(); + } catch (ExecutionException e) { + final Throwable cause = e.getCause(); + if (cause instanceof HttpException) { + log.debug("[IGNORED] Failed to fetch grants for user: {}", cause.getMessage()); + // Handle Keycloak token / grants endpoint returning status 401 Unauthorized + // 401 happens when the token has expired or has been revoked + if (401 == ((HttpException) cause).getStatus()) { + grantsCache.remove(f.getUserId()); + log.debug("Removed user from grants cache: {}", f.getUserId()); + Services.getInstance().getSessions().removeAllWithMatchingAccessToken(f.getGrantsInfo().accessToken); + continue; + } + } + + log.warn("[IGNORED] Failed to fetch grants for user: {}", e.getMessage(), e); + + } catch (Throwable e) { + if (log.isWarnEnabled()) { + log.warn("[IGNORED] Failed to fetch grants for user: {}, token: {} - {}", f.getUserId(), mask(f.getGrantsInfo().accessToken), e.getMessage(), e); + } + } + } + + } catch (Throwable t) { + // Log, but don't rethrow the exception to prevent scheduler cancelling the scheduled job. + log.error("{}", t.getMessage(), t); + } finally { + log.debug("Done refreshing grants"); + } + } + + /** + * Remove grants for the given user from the cache if the access token for it is expired or there was no access for the + * maximum idle time. + * + * @param userId User id + */ + private void removeUserFromCacheIfExpiredOrIdle(String userId) { + synchronized (grantsCache) { + Info info = grantsCache.get(userId); + if (info != null) { + long now = System.currentTimeMillis(); + boolean isIdle = info.getLastUsed() < now - grantsMaxIdleMillis; + if (isIdle || info.isExpiredAt(now)) { + log.debug("Removed user from grants cache due to {}: {}", isIdle ? "'idle'" : "'expired'", userId); + grantsCache.remove(userId); + } + } + } + } + + /** + * Lookup the grants cache given the token + * + * @param token A token object + * @return Grants info object representing a grants cache entry + */ + Info getGrantsInfoFromCache(BearerTokenWithPayload token) { + Info grantsInfo; + + synchronized (grantsCache) { + grantsInfo = grantsCache.computeIfAbsent(token.principalName(), + k -> new Info(token.value(), token.lifetimeMs())); + } + + // Always keep the longest lasting access token in the cache + grantsInfo.updateTokenIfExpiresLater(token); + return grantsInfo; + } + + /** + * This method ensures that for any particular user id there is a single grants fetching operation in progress at any one time. + *

+ * If for the current user there is a grants fetch operation in progress the thread simply waits for the results of that operation. + * This is only relevant if there are no grants for the user available in grants cache. + * + * @param userId User id + * @param grantsInfo Grants info object representing the cached grants entry for the user + * @return Grants JSON + */ + JsonNode fetchGrantsForUserOrWaitForDelivery(String userId, Info grantsInfo) { + // Fetch authorization grants + Semaphores.SemaphoreResult semaphore = semaphores.acquireSemaphore(userId); + + // Try to acquire semaphore for fetching grants + if (semaphore.acquired()) { + // If acquired + try { + log.debug("Acquired semaphore for '{}'", userId); + JsonNode grants = fetchAndSaveGrants(userId, grantsInfo); + semaphore.future().complete(grants); + return grants; + + } catch (Throwable t) { + semaphore.future().completeExceptionally(t); + throw t; + } finally { + semaphores.releaseSemaphore(userId); + log.debug("Released semaphore for '{}'", userId); + } + + } else { + try { + log.debug("Waiting on another thread to get grants for '{}'", userId); + return semaphore.future().get(); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof ServiceException) { + throw (ServiceException) cause; + } else { + throw new ServiceException("ExecutionException waiting for grants result: ", e); + } + } catch (InterruptedException e) { + throw new ServiceException("InterruptedException waiting for grants result: ", e); + } + } + } + + /** + * This method compares two JSON objects with grants for semantic equality. + *

+ * Keycloak sometimes returns grants for the user in different order, treating the JSON array as a Set. + * When checking for equality we should also treat the JSON array as a Set. + * + * @param grants1 First JSON array containing grants + * @param grants2 Second JSON array containing grants + * @return true if grants objects are semantically equal + */ + private static boolean semanticGrantsEquals(JsonNode grants1, JsonNode grants2) { + if (grants1 == grants2) return true; + if (grants1 == null) { + throw new IllegalArgumentException("Invalid grants: null"); + } + if (grants2 == null) { + return false; + } + if (!grants1.isArray()) { + throw new IllegalArgumentException("Invalid grants: not a JSON array"); + } + if (!grants2.isArray()) { + throw new IllegalArgumentException("Invalid grants: not a JSON array"); + } + return asSetOfNodes((ArrayNode) grants1).equals(asSetOfNodes((ArrayNode) grants2)); + } +} diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/JwtKafkaPrincipalBuilder.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/JwtKafkaPrincipalBuilder.java index 19105236..0d5005d9 100644 --- a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/JwtKafkaPrincipalBuilder.java +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/JwtKafkaPrincipalBuilder.java @@ -7,17 +7,7 @@ import io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder; /** - * This class needs to be enabled as the PrincipalBuilder on Kafka Broker. - *

- * It ensures that the OAuthBearerToken produced by io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler - * is available to KeycloakRBACAuthorizer. - *

- *

- * You can use 'principal.builder.class=io.strimzi.kafka.oauth.server.authorizer.JwtKafkaPrincipalBuilder' - * property definition in server.properties to install it. - *

- * - * @deprecated Use io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder class instead. + * @deprecated Use {@link io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder} class instead. */ @Deprecated public class JwtKafkaPrincipalBuilder extends OAuthKafkaPrincipalBuilder { diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizer.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizer.java new file mode 100644 index 00000000..3572fd2e --- /dev/null +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizer.java @@ -0,0 +1,247 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server.authorizer; + +import io.strimzi.kafka.oauth.common.ConfigException; +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.resource.ResourceType; +import org.apache.kafka.metadata.authorizer.AclMutator; +import org.apache.kafka.metadata.authorizer.ClusterMetadataAuthorizer; +import org.apache.kafka.metadata.authorizer.StandardAcl; +import org.apache.kafka.metadata.authorizer.StandardAuthorizer; +import org.apache.kafka.server.authorizer.AclCreateResult; +import org.apache.kafka.server.authorizer.AclDeleteResult; +import org.apache.kafka.server.authorizer.Action; +import org.apache.kafka.server.authorizer.AuthorizableRequestContext; +import org.apache.kafka.server.authorizer.AuthorizationResult; +import org.apache.kafka.server.authorizer.AuthorizerServerInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.atomic.AtomicInteger; + + +/** + * An authorizer using Keycloak Authorization Services that supports KRaft mode and Zookeeper mode. + *

+ * In KRaft mode multiple instances of this class can be instantiated, and each needs its own instance of StandardAuthorizer for + * delegating authorization to Kafka ACL implementation. + *

+ * This authorizer auto-detects whether it runs in KRaft mode or Zookeeper mode, and automatically sets up appropriate Kafka ACL delegation classes. + * All authorization logic is delegated to KeycloakRBACAuthorizer of which a single instance is created and shared between all + * instances of this class. + *

+ * To install this authorizer in Kafka, specify the following in your 'server.properties': + *

+ *
+ *     authorizer.class.name=io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer
+ *     principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder
+ * 
+ *

+ * Configuration options are the same as for {@link KeycloakRBACAuthorizer}. + */ +@SuppressWarnings("deprecation") +public class KeycloakAuthorizer implements ClusterMetadataAuthorizer { + + private static final Logger log = LoggerFactory.getLogger(KeycloakAuthorizer.class); + + /** + * A counter used to generate an instance number for each instance of this class + */ + private final static AtomicInteger INSTANCE_NUMBER_COUNTER = new AtomicInteger(1); + + /** + * An instance number used in {@link #toString()} method, to easily track the number of instances of this class + */ + private final int instanceNumber = INSTANCE_NUMBER_COUNTER.getAndIncrement(); + + private StandardAuthorizer delegate; + private KeycloakRBACAuthorizer singleton; + + @Override + public void configure(Map configs) { + Configuration configuration = new Configuration(configs); + + // There is one singleton to which authorize() calls are delegated + singleton = KeycloakAuthorizerService.getInstance(); + if (singleton == null) { + singleton = new KeycloakRBACAuthorizer(this); + singleton.configure(configs); + KeycloakAuthorizerService.setInstance(singleton); + } else if (!configuration.equals(singleton.getConfiguration())) { + throw new ConfigException("Only one authorizer configuration per JVM is supported"); + } + + if (configuration.isDelegateToKafkaACL() && configuration.isKRaft()) { + delegate = instantiateStandardAuthorizer(); + delegate.configure(configs); + } + + if (log.isDebugEnabled()) { + log.debug("Configured " + this + " using " + singleton); + } + } + + private StandardAuthorizer instantiateStandardAuthorizer() { + try { + log.debug("Using StandardAuthorizer (KRaft based) as a delegate"); + return new StandardAuthorizer(); + } catch (Exception e) { + throw new ConfigException("KRaft mode detected ('process.roles' configured), but failed to instantiate org.apache.kafka.metadata.authorizer.StandardAuthorizer", e); + } + } + + @Override + public Map> start(AuthorizerServerInfo serverInfo) { + if (delegate != null) { + return delegate.start(serverInfo); + } + return singleton.start(serverInfo); + } + + @Override + public void setAclMutator(AclMutator aclMutator) { + if (delegate != null) { + delegate.setAclMutator(aclMutator); + } + } + + @Override + public AclMutator aclMutatorOrException() { + if (delegate != null) { + return delegate.aclMutatorOrException(); + } + throw new IllegalStateException("KeycloakAuthorizer has not been properly configured"); + } + + @Override + public void completeInitialLoad() { + if (delegate != null) { + delegate.completeInitialLoad(); + } + } + + @Override + public void completeInitialLoad(Exception e) { + if (e != null) { + e.printStackTrace(); + } + if (delegate != null) { + delegate.completeInitialLoad(e); + } + } + + @Override + public void loadSnapshot(Map acls) { + if (delegate != null) { + delegate.loadSnapshot(acls); + } + } + + @Override + public void addAcl(Uuid id, StandardAcl acl) { + if (delegate != null) { + delegate.addAcl(id, acl); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + @Override + public void removeAcl(Uuid id) { + if (delegate != null) { + delegate.removeAcl(id); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + @Override + public Iterable acls(AclBindingFilter filter) { + if (delegate != null) { + return delegate.acls(filter); + } else if (singleton != null) { + return singleton.acls(filter); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + @Override + public List> createAcls(AuthorizableRequestContext requestContext, List aclBindings) { + if (delegate != null) { + return delegate.createAcls(requestContext, aclBindings); + } else if (singleton != null) { + return singleton.createAcls(requestContext, aclBindings); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + + @Override + public List> deleteAcls(AuthorizableRequestContext requestContext, List aclBindingFilters) { + if (delegate != null) { + return delegate.deleteAcls(requestContext, aclBindingFilters); + } else if (singleton != null) { + return singleton.deleteAcls(requestContext, aclBindingFilters); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + @Override + public int aclCount() { + if (delegate != null) { + return delegate.aclCount(); + } else if (singleton != null) { + return singleton.aclCount(); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + public AuthorizationResult authorizeByResourceType(AuthorizableRequestContext requestContext, AclOperation op, ResourceType resourceType) { + if (delegate != null) { + return delegate.authorizeByResourceType(requestContext, op, resourceType); + } else if (singleton != null) { + return singleton.authorizeByResourceType(requestContext, op, resourceType); + } else { + throw new UnsupportedOperationException("ACL delegation not enabled"); + } + } + + @Override + public List authorize(AuthorizableRequestContext requestContext, List actions) { + if (delegate != null) { + return singleton.authorize(delegate, requestContext, actions); + } else { + return singleton.authorize(requestContext, actions); + } + } + + @Override + public void close() throws IOException { + if (singleton != null) { + singleton.close(); + } + if (delegate != null) { + delegate.close(); + } + } + + @Override + public String toString() { + return KeycloakAuthorizer.class.getSimpleName() + "@" + instanceNumber; + } +} diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizerService.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizerService.java new file mode 100644 index 00000000..8f96e8cd --- /dev/null +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakAuthorizerService.java @@ -0,0 +1,36 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server.authorizer; + +/** + * A static holder for the KeycloakAuthorizerSingleton instance + */ +@SuppressWarnings("deprecation") +public class KeycloakAuthorizerService { + + private static KeycloakRBACAuthorizer instance; + + /** + * Get the current singleton instance + * + * @return The instance previously set by {@link #setInstance(KeycloakRBACAuthorizer)} + */ + static KeycloakRBACAuthorizer getInstance() { + return instance; + } + + /** + * Set the current KeycloakRBACAuthorizer instance as singleton + * + * @param instance The new instance + */ + static void setInstance(KeycloakRBACAuthorizer instance) { + KeycloakAuthorizerService.instance = instance; + } + + static void clearInstance() { + instance = null; + } +} diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakRBACAuthorizer.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakRBACAuthorizer.java index e656ddb6..40e6de03 100644 --- a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakRBACAuthorizer.java +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/KeycloakRBACAuthorizer.java @@ -5,11 +5,7 @@ package io.strimzi.kafka.oauth.server.authorizer; import com.fasterxml.jackson.databind.JsonNode; -import io.strimzi.kafka.oauth.client.ClientConfig; import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; -import io.strimzi.kafka.oauth.common.Config; -import io.strimzi.kafka.oauth.common.ConfigException; -import io.strimzi.kafka.oauth.common.ConfigUtil; import io.strimzi.kafka.oauth.common.HttpException; import io.strimzi.kafka.oauth.common.JSONUtil; import io.strimzi.kafka.oauth.common.SSLUtil; @@ -21,10 +17,7 @@ import io.strimzi.kafka.oauth.services.OAuthMetrics; import io.strimzi.kafka.oauth.services.ServiceException; import io.strimzi.kafka.oauth.services.Services; -import io.strimzi.kafka.oauth.services.SessionFuture; -import io.strimzi.kafka.oauth.services.Sessions; import io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder; -import io.strimzi.kafka.oauth.validator.DaemonThreadFactory; import kafka.security.authorizer.AclAuthorizer; import org.apache.kafka.common.Endpoint; import org.apache.kafka.common.acl.AclBinding; @@ -36,34 +29,23 @@ import org.apache.kafka.server.authorizer.Action; import org.apache.kafka.server.authorizer.AuthorizableRequestContext; import org.apache.kafka.server.authorizer.AuthorizationResult; +import org.apache.kafka.server.authorizer.Authorizer; import org.apache.kafka.server.authorizer.AuthorizerServerInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLSocketFactory; -import java.net.URI; -import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.Collectors; -import static io.strimzi.kafka.oauth.common.Config.isTrue; import static io.strimzi.kafka.oauth.common.HttpUtil.post; import static io.strimzi.kafka.oauth.common.LogUtil.mask; import static io.strimzi.kafka.oauth.common.OAuthAuthenticator.urlencode; @@ -81,6 +63,11 @@ * principal.builder.class=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder * *

+ * This authorizer only supports Kafka running in 'zookeeper' mode. It does not support 'KRaft' mode. + * There is a {@link KeycloakAuthorizer} class that auto-detects the environment and works both in 'KRaft' and 'zookeeper' mode, + * that should be used instead of this class. + *

+ *

* There is additional configuration that needs to be specified in order for this authorizer to work. *

*
@@ -93,7 +80,7 @@ * Required configuration: *

*
    - *
  • strimzi.authorization.token.endpoint.uri A URL of the Keycloak's token endpoint (e.g. https://keycloak:8443/auth/realms/master/protocol/openid-connect/token).
    + *
  • strimzi.authorization.token.endpoint.uri A URL of the Keycloak's token endpoint (e.g. https://keycloak:8443/auth/realms/master/protocol/openid-connect/token).
    * If not present, oauth.token.endpoint.uri is used as a fallback configuration key to avoid unnecessary duplication when already present for the purpose of client authentication. *
  • *
  • strimzi.authorization.client.id A client id of the OAuth client definition in Keycloak, that has Authorization Services enabled.
    @@ -106,16 +93,25 @@ *

    *
      *
    • strimzi.authorization.kafka.cluster.name The name of this cluster, used to target permissions to specific Kafka cluster, making it possible to manage multiple clusters within the same Keycloak realm.
      - * The default value is kafka-cluster + * The default value is kafka-cluster. *
    • *
    • strimzi.authorization.delegate.to.kafka.acl Whether authorization decision should be delegated to ACLAuthorizer if DENIED by Keycloak Authorization Services policies.
      - * The default value is false + * The default value is false. *
    • *
    • strimzi.authorization.grants.refresh.period.seconds The time interval for refreshing the grants of the active sessions. The scheduled job iterates over active sessions and fetches a fresh list of grants for each.
      - * The default value is 60 + * The default value is 60. + *
    • + *
    • strimzi.authorization.grants.refresh.pool.size The number of threads used to fetch grants from token endpoint (in parallel).
      + * The default value is 5. + *
    • + *
    • strimzi.authorization.reuse.grants Set this option to 'false' if you want every new session to reload and cache new grants for the user rather than using existing ones from the cache. + * The default value is true *
    • - *
    • strimzi.authorization.grants.refresh.pool.size The number of threads to fetch grants from token endpoint (in parallel).
      - * The default value is 5 + *
    • strimzi.authorization.grants.max.idle.time.seconds The time limit in seconds of a cached grant not being accessed. After that time it will be evicted from grants cache to prevent possibly stale remnant sessions from consuming memory.
      + * The default value is 300. + *
    • + *
    • strimzi.authorization.grants.gc.period.seconds A time in seconds between two consecutive runs of the background job that evicts idle or expired grants from cache.
      + * The default value is 300. *
    • *
    • strimzi.authorization.http.retries The number of times to retry fetch grants from token endpoint.
      * The retry is immediate without pausing due to the authorize() method holding up the Kafka worker thread. The default is 0 i.e. no retries. @@ -160,258 +156,143 @@ * This authorizer honors the super.users configuration. Super users are automatically granted any authorization request. *

      */ -@SuppressWarnings("deprecation") -public class KeycloakRBACAuthorizer extends AclAuthorizer { - - private static final String PRINCIPAL_BUILDER_CLASS = OAuthKafkaPrincipalBuilder.class.getName(); - private static final String DEPRECATED_PRINCIPAL_BUILDER_CLASS = JwtKafkaPrincipalBuilder.class.getName(); +@Deprecated +public class KeycloakRBACAuthorizer implements Authorizer { static final Logger log = LoggerFactory.getLogger(KeycloakRBACAuthorizer.class); static final Logger GRANT_LOG = LoggerFactory.getLogger(KeycloakRBACAuthorizer.class.getName() + ".grant"); static final Logger DENY_LOG = LoggerFactory.getLogger(KeycloakRBACAuthorizer.class.getName() + ".deny"); - private URI tokenEndpointUrl; - private String clientId; - private String clusterName; - private SSLSocketFactory socketFactory; - private HostnameVerifier hostnameVerifier; - private List superUsers = Collections.emptyList(); - private boolean delegateToKafkaACL = false; - private int connectTimeoutSeconds; - private int readTimeoutSeconds; + /** + * A counter used to generate an instance number for each instance of this class + */ + private final static AtomicInteger INSTANCE_NUMBER_COUNTER = new AtomicInteger(1); - private int httpRetries; + /** + * An instance number used in {@link #toString()} method, to easily track the number of instances of this class + */ + private final int instanceNumber = INSTANCE_NUMBER_COUNTER.getAndIncrement(); + + private final Authorizer delegator; - private boolean reuseGrants; + private SSLSocketFactory socketFactory; + private HostnameVerifier hostnameVerifier; // Turning it to false will not enforce access token expiry time (only for debugging purposes during development) private final boolean denyWhenTokenInvalid = true; - private ExecutorService workerPool; - private OAuthMetrics metrics; - private boolean enableMetrics; + private SensorKeyProducer authzSensorKeyProducer; private SensorKeyProducer grantsSensorKeyProducer; - private final Semaphores semaphores = new Semaphores<>(); + + private Authorizer delegate; + + private GrantsHandler grantsHandler; + + private Configuration configuration; /** * Create a new instance */ public KeycloakRBACAuthorizer() { - super(); + log.warn("KeycloakRBACAuthorizer has been deprecated, please use '{}' instead.", KeycloakAuthorizer.class.getName()); + this.delegator = null; + } + + /** + * Create a new instance, passing a delegating authorizer instance + * + * @param delegator The delegating authorizer instance + */ + KeycloakRBACAuthorizer(Authorizer delegator) { + this.delegator = delegator; } @Override public void configure(Map configs) { - AuthzConfig config = convertToCommonConfig(configs); - - String pbclass = (String) configs.get("principal.builder.class"); - if (!PRINCIPAL_BUILDER_CLASS.equals(pbclass) && !DEPRECATED_PRINCIPAL_BUILDER_CLASS.equals(pbclass)) { - throw new ConfigException("KeycloakRBACAuthorizer requires " + PRINCIPAL_BUILDER_CLASS + " as 'principal.builder.class'"); - } - - if (DEPRECATED_PRINCIPAL_BUILDER_CLASS.equals(pbclass)) { - log.warn("The '" + DEPRECATED_PRINCIPAL_BUILDER_CLASS + "' class has been deprecated, and may be removed in the future. Please use '" + PRINCIPAL_BUILDER_CLASS + "' as 'principal.builder.class' instead."); - } - - configureTokenEndpoint(config); - - clientId = ConfigUtil.getConfigWithFallbackLookup(config, AuthzConfig.STRIMZI_AUTHORIZATION_CLIENT_ID, ClientConfig.OAUTH_CLIENT_ID); - if (clientId == null) { - throw new ConfigException("OAuth client id ('strimzi.authorization.client.id') not set."); - } - - configureHttpTimeouts(config); - - socketFactory = createSSLFactory(config); - hostnameVerifier = createHostnameVerifier(config); - - clusterName = config.getValue(AuthzConfig.STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME); - if (clusterName == null) { - clusterName = "kafka-cluster"; - } - - delegateToKafkaACL = config.getValueAsBoolean(AuthzConfig.STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL, false); - - configureSuperUsers(configs); - - // Number of threads that can perform token endpoint requests at the same time - final int grantsRefreshPoolSize = config.getValueAsInt(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE, 5); - if (grantsRefreshPoolSize < 1) { - throw new ConfigException("Invalid value of 'strimzi.authorization.grants.refresh.pool.size': " + grantsRefreshPoolSize + ". Has to be >= 1."); - } - - // Less or equal zero means to never check - final int grantsRefreshPeriodSeconds = config.getValueAsInt(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, 60); - - if (grantsRefreshPeriodSeconds > 0) { - workerPool = Executors.newFixedThreadPool(grantsRefreshPoolSize); - setupRefreshGrantsJob(grantsRefreshPeriodSeconds); - } - - reuseGrants = config.getValueAsBoolean(AuthzConfig.STRIMZI_AUTHORIZATION_REUSE_GRANTS, false); + configuration = new Configuration(configs); + configuration.printLogs(); - configureHttpRetries(config); - - configureMetrics(configs, config); - - authzSensorKeyProducer = new KeycloakAuthorizationSensorKeyProducer("keycloak-authorizer", tokenEndpointUrl); - grantsSensorKeyProducer = new GrantsHttpSensorKeyProducer("keycloak-authorizer", tokenEndpointUrl); - - if (delegateToKafkaACL) { - super.configure(configs); - } + assignFields(configuration); if (log.isDebugEnabled()) { - log.debug("Configured KeycloakRBACAuthorizer:\n tokenEndpointUri: " + tokenEndpointUrl + log.debug("Configured " + this + (delegator != null ? " (via " + delegator + ")" : "") + ":\n tokenEndpointUri: " + configuration.getTokenEndpointUrl() + "\n sslSocketFactory: " + socketFactory + "\n hostnameVerifier: " + hostnameVerifier - + "\n clientId: " + clientId - + "\n clusterName: " + clusterName - + "\n delegateToKafkaACL: " + delegateToKafkaACL - + "\n superUsers: " + superUsers.stream().map(u -> "'" + u.getType() + ":" + u.getName() + "'").collect(Collectors.toList()) - + "\n grantsRefreshPeriodSeconds: " + grantsRefreshPeriodSeconds - + "\n grantsRefreshPoolSize: " + grantsRefreshPoolSize - + "\n httpRetries: " + httpRetries - + "\n reuseGrants: " + reuseGrants - + "\n connectTimeoutSeconds: " + connectTimeoutSeconds - + "\n readTimeoutSeconds: " + readTimeoutSeconds - + "\n enableMetrics: " + enableMetrics + + "\n clientId: " + configuration.getClientId() + + "\n clusterName: " + configuration.getClusterName() + + "\n delegateToKafkaACL: " + configuration.isDelegateToKafkaACL() + + "\n superUsers: " + configuration.getSuperUsers().stream().map(u -> "'" + u.getType() + ":" + u.getName() + "'").collect(Collectors.toList()) + + "\n grantsRefreshPeriodSeconds: " + configuration.getGrantsRefreshPeriodSeconds() + + "\n grantsRefreshPoolSize: " + configuration.getGrantsRefreshPoolSize() + + "\n grantsMaxIdleTimeSeconds: " + configuration.getGrantsMaxIdleTimeSeconds() + + "\n httpRetries: " + configuration.getHttpRetries() + + "\n reuseGrants: " + configuration.isReuseGrants() + + "\n connectTimeoutSeconds: " + configuration.getConnectTimeoutSeconds() + + "\n readTimeoutSeconds: " + configuration.getReadTimeoutSeconds() + + "\n enableMetrics: " + configuration.isEnableMetrics() + + "\n gcPeriodSeconds: " + configuration.getGcPeriodSeconds() ); } } - private void configureHttpTimeouts(AuthzConfig config) { - connectTimeoutSeconds = ConfigUtil.getTimeoutConfigWithFallbackLookup(config, AuthzConfig.STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS, ClientConfig.OAUTH_CONNECT_TIMEOUT_SECONDS); - readTimeoutSeconds = ConfigUtil.getTimeoutConfigWithFallbackLookup(config, AuthzConfig.STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS, ClientConfig.OAUTH_READ_TIMEOUT_SECONDS); - } - - private void configureSuperUsers(Map configs) { - String users = (String) configs.get("super.users"); - if (users != null) { - superUsers = Arrays.stream(users.split(";")) - .map(UserSpec::of) - .collect(Collectors.toList()); - } - } + private void assignFields(Configuration configuration) { + socketFactory = createSSLFactory(configuration); + hostnameVerifier = createHostnameVerifier(configuration); - private void configureHttpRetries(AuthzConfig config) { - httpRetries = config.getValueAsInt(AuthzConfig.STRIMZI_AUTHORIZATION_HTTP_RETRIES, 0); - if (httpRetries < 0) { - throw new ConfigException("Invalid value of 'strimzi.authorization.http.retries': " + httpRetries + ". Has to be >= 0."); + if (configuration.isDelegateToKafkaACL()) { + setupDelegateAuthorizer(); } - } - private void configureTokenEndpoint(AuthzConfig config) { - String endpoint = ConfigUtil.getConfigWithFallbackLookup(config, AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, - ClientConfig.OAUTH_TOKEN_ENDPOINT_URI); - if (endpoint == null) { - throw new ConfigException("OAuth2 Token Endpoint ('strimzi.authorization.token.endpoint.uri') not set."); + if (!Services.isAvailable()) { + Services.configure(configuration.getConfigMap()); } - - try { - tokenEndpointUrl = new URI(endpoint); - } catch (URISyntaxException e) { - throw new ConfigException("Specified token endpoint uri is invalid: " + endpoint); + if (configuration.isEnableMetrics()) { + metrics = Services.getInstance().getMetrics(); } - } - private void configureMetrics(Map configs, AuthzConfig config) { - if (!Services.isAvailable()) { - Services.configure(configs); - } + authzSensorKeyProducer = new KeycloakAuthorizationSensorKeyProducer("keycloak-authorizer", configuration.getTokenEndpointUrl()); + grantsSensorKeyProducer = new GrantsHttpSensorKeyProducer("keycloak-authorizer", configuration.getTokenEndpointUrl()); - String enableMetricsString = ConfigUtil.getConfigWithFallbackLookup(config, AuthzConfig.STRIMZI_AUTHORIZATION_ENABLE_METRICS, Config.OAUTH_ENABLE_METRICS); - try { - enableMetrics = enableMetricsString != null && isTrue(enableMetricsString); - } catch (Exception e) { - throw new ConfigException("Bad boolean value for key: " + AuthzConfig.STRIMZI_AUTHORIZATION_ENABLE_METRICS + ", value: " + enableMetricsString); - } + grantsHandler = new GrantsHandler(configuration.getGrantsRefreshPeriodSeconds(), + configuration.getGrantsRefreshPoolSize(), + configuration.getGrantsMaxIdleTimeSeconds(), + this::fetchAuthorizationGrantsOnce, + configuration.getHttpRetries(), + configuration.getGcPeriodSeconds()); - if (enableMetrics) { - metrics = Services.getInstance().getMetrics(); + // Call configure() on the delegate as the last thing + if (delegate != null) { + delegate.configure(configuration.getConfigMap()); } } /** - * This method extracts the key=value configuration entries relevant for KeycloakRBACAuthorizer from - * Kafka properties configuration file (server.properties) and wraps them with AuthzConfig instance. - *

      - * Any new config options have to be added here in order to become visible, otherwise they will be ignored. - * - * @param configs Kafka configs map - * @return Config object + * This method is only called if delegateToKafkaACL is enabled. + * It is responsible for instantiating the Authorizer delegate instance. */ - static AuthzConfig convertToCommonConfig(Map configs) { - Properties p = new Properties(); - - // If you add a new config property, make sure to add it to this list - // otherwise it won't be picked - String[] keys = { - AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, - AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE, - AuthzConfig.STRIMZI_AUTHORIZATION_HTTP_RETRIES, - AuthzConfig.STRIMZI_AUTHORIZATION_REUSE_GRANTS, - AuthzConfig.STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL, - AuthzConfig.STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME, - AuthzConfig.STRIMZI_AUTHORIZATION_CLIENT_ID, - Config.OAUTH_CLIENT_ID, - AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, - ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_LOCATION, - Config.OAUTH_SSL_TRUSTSTORE_LOCATION, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_CERTIFICATES, - Config.OAUTH_SSL_TRUSTSTORE_CERTIFICATES, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_PASSWORD, - Config.OAUTH_SSL_TRUSTSTORE_PASSWORD, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_TYPE, - Config.OAUTH_SSL_TRUSTSTORE_TYPE, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_SECURE_RANDOM_IMPLEMENTATION, - Config.OAUTH_SSL_SECURE_RANDOM_IMPLEMENTATION, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, - Config.OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, - AuthzConfig.STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS, - Config.OAUTH_CONNECT_TIMEOUT_SECONDS, - AuthzConfig.STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS, - Config.OAUTH_READ_TIMEOUT_SECONDS, - AuthzConfig.STRIMZI_AUTHORIZATION_ENABLE_METRICS, - Config.OAUTH_ENABLE_METRICS - }; - - // Copy over the keys - for (String key: keys) { - ConfigUtil.putIfNotNull(p, key, configs.get(key)); + void setupDelegateAuthorizer() { + if (delegate == null && !configuration.isKRaft()) { + log.debug("Using AclAuthorizer (ZooKeeper based) as a delegate"); + delegate = new AclAuthorizer(); } - - return new AuthzConfig(p); } - static SSLSocketFactory createSSLFactory(Config config) { - String truststore = ConfigUtil.getConfigWithFallbackLookup(config, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_LOCATION, Config.OAUTH_SSL_TRUSTSTORE_LOCATION); - String truststoreData = ConfigUtil.getConfigWithFallbackLookup(config, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_CERTIFICATES, Config.OAUTH_SSL_TRUSTSTORE_CERTIFICATES); - String password = ConfigUtil.getConfigWithFallbackLookup(config, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_PASSWORD, Config.OAUTH_SSL_TRUSTSTORE_PASSWORD); - String type = ConfigUtil.getConfigWithFallbackLookup(config, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_TRUSTSTORE_TYPE, Config.OAUTH_SSL_TRUSTSTORE_TYPE); - String rnd = ConfigUtil.getConfigWithFallbackLookup(config, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_SECURE_RANDOM_IMPLEMENTATION, Config.OAUTH_SSL_SECURE_RANDOM_IMPLEMENTATION); - - return SSLUtil.createSSLFactory(truststore, truststoreData, password, type, rnd); + static SSLSocketFactory createSSLFactory(Configuration config) { + return SSLUtil.createSSLFactory(config.getTruststore(), config.getTruststoreData(), config.getTruststorePassword(), config.getTruststoreType(), config.getPrng()); } - static HostnameVerifier createHostnameVerifier(Config config) { - String hostCheck = ConfigUtil.getConfigWithFallbackLookup(config, - AuthzConfig.STRIMZI_AUTHORIZATION_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, Config.OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM); - - if (hostCheck == null) { - hostCheck = "HTTPS"; - } + /** + * This method returning null means that the default certificate hostname validation rules apply + * + * @param config Configuration object with parsed configuration + * @return HostnameVerifier that ignores hostname mismatches in the certificate or null + */ + static HostnameVerifier createHostnameVerifier(Configuration config) { // Following Kafka convention for skipping hostname validation (when set to ) - return "".equals(hostCheck) ? SSLUtil.createAnyHostHostnameVerifier() : null; + return "".equals(config.getCertificateHostCheckAlgorithm()) ? SSLUtil.createAnyHostHostnameVerifier() : null; } /** @@ -428,6 +309,21 @@ static HostnameVerifier createHostnameVerifier(Config config) { * @return List of authorization results for each action in the same order as the provided actions */ public List authorize(AuthorizableRequestContext requestContext, List actions) { + return authorize(delegate, requestContext, actions); + } + + /** + * Authorizer method that can be called by another authorizer to delegate the authorization call to a specific delegate instance. + * It allows multiple authorizer instances to delegate to the same KeycloakRBACAuthorizer instance. + * + * @see KeycloakRBACAuthorizer#authorize(AuthorizableRequestContext, List) + * + * @param delegate Delegate authorizer to use as fallback + * @param requestContext Request context including request type, security protocol and listener name + * @param actions Actions being authorized including resource and operation for each action + * @return List of authorization results for each action in the same order as the provided actions + */ + List authorize(Authorizer delegate, AuthorizableRequestContext requestContext, List actions) { JsonNode grants = null; long startTime = System.currentTimeMillis(); @@ -436,15 +332,15 @@ public List authorize(AuthorizableRequestContext requestCon try { KafkaPrincipal principal = requestContext.principal(); - for (UserSpec u : superUsers) { + for (UserSpec u : configuration.getSuperUsers()) { if (principal.getPrincipalType().equals(u.getType()) && principal.getName().equals(u.getName())) { for (Action action: actions) { - // It's a super user. super users are granted everything + // It's a superuser. Superusers are granted everything if (GRANT_LOG.isDebugEnabled() && action.logIfAllowed()) { GRANT_LOG.debug("Authorization GRANTED - user is a superuser: " + requestContext.principal() + - ", cluster: " + clusterName + ", operation: " + action.operation() + ", resource: " + fromResourcePattern(action.resourcePattern())); + ", cluster: " + configuration.getClusterName() + ", operation: " + action.operation() + ", resource: " + fromResourcePattern(action.resourcePattern())); } } addAuthzMetricSuccessTime(startTime); @@ -455,7 +351,8 @@ public List authorize(AuthorizableRequestContext requestCon if (!(principal instanceof OAuthKafkaPrincipal)) { // If user wasn't authenticated over OAuth, and simple ACL delegation is enabled // we delegate to simple ACL - result = delegateIfRequested(requestContext, actions, null); + + result = delegateIfRequested(delegate, requestContext, actions, null); addAuthzMetricSuccessTime(startTime); return result; @@ -463,33 +360,43 @@ public List authorize(AuthorizableRequestContext requestCon // // Check if authorization grants are available - // If not, fetch authorization grants and store them in the token + // If not, fetch authorization grants and store them in the grants cache // - OAuthKafkaPrincipal jwtPrincipal = (OAuthKafkaPrincipal) principal; - - BearerTokenWithPayload token = jwtPrincipal.getJwt(); + BearerTokenWithPayload token = ((OAuthKafkaPrincipal) principal).getJwt(); if (denyIfTokenInvalid(token)) { addAuthzMetricSuccessTime(startTime); return Collections.nCopies(actions.size(), AuthorizationResult.DENIED); } - grants = (JsonNode) token.getPayload(); - - if (grants == null) { - log.debug("No grants yet for user: {}", principal); - grants = handleFetchingGrants(token); + if (grantsHandler == null) { + throw new IllegalStateException("Authorizer has not been configured - configure() not called"); } - if (log.isDebugEnabled()) { - log.debug("Authorization grants for user {}: {}", principal, grants); + GrantsHandler.Info grantsInfo = grantsHandler.getGrantsInfoFromCache(token); + log.trace("Got grantsInfo: {}", grantsInfo); + grants = grantsInfo.getGrants(); + boolean newSession = token.getPayload() == null; + boolean mustReload = !configuration.isReuseGrants() && newSession; + if (grants == null || mustReload) { + if (grants == null) { + log.debug("No grants yet for user: {}", principal); + } else { + log.debug("Grants available but new session and reuseGrants is `false`"); + } + grants = grantsHandler.fetchGrantsForUserOrWaitForDelivery(principal.getName(), grantsInfo); + if (mustReload) { + // save empty JSON object as marker that the session has had the grants loaded. + token.setPayload(JSONUtil.newObjectNode()); + } } + log.debug("Got grants for '{}': {}", principal, grants); if (grants != null) { - result = allowOrDenyBasedOnGrants(requestContext, actions, grants); + result = allowOrDenyBasedOnGrants(delegate, requestContext, actions, grants); } else { - result = delegateIfRequested(requestContext, actions, null); + result = delegateIfRequested(delegate, requestContext, actions, null); } addAuthzMetricSuccessTime(startTime); return result; @@ -498,7 +405,7 @@ public List authorize(AuthorizableRequestContext requestCon log.error("An unexpected exception has occurred: ", t); if (DENY_LOG.isDebugEnabled()) { DENY_LOG.debug("Authorization DENIED due to error - user: " + requestContext.principal() + - ", cluster: " + clusterName + ", actions: " + actions + ",\n permissions: " + grants); + ", cluster: " + configuration.getClusterName() + ", actions: " + actions + ",\n permissions: " + grants); } addAuthzMetricErrorTime(t, startTime); @@ -514,7 +421,7 @@ private String fromResourcePattern(ResourcePattern pattern) { } - private List allowOrDenyBasedOnGrants(AuthorizableRequestContext requestContext, List actions, JsonNode grants) { + private List allowOrDenyBasedOnGrants(Authorizer delegate, AuthorizableRequestContext requestContext, List actions, JsonNode grants) { List results = new ArrayList<>(actions.size()); // @@ -525,7 +432,7 @@ private List allowOrDenyBasedOnGrants(AuthorizableRequestCo for (JsonNode permission : grants) { String name = permission.get("rsname").asText(); ResourceSpec resourceSpec = ResourceSpec.of(name); - if (resourceSpec.match(clusterName, action.resourcePattern().resourceType().name(), action.resourcePattern().name())) { + if (resourceSpec.match(configuration.getClusterName(), action.resourcePattern().resourceType().name(), action.resourcePattern().name())) { JsonNode scopes = permission.get("scopes"); ScopesSpec grantedScopes = scopes == null ? null : ScopesSpec.of( @@ -534,7 +441,7 @@ private List allowOrDenyBasedOnGrants(AuthorizableRequestCo if (scopes == null || grantedScopes.isGranted(action.operation().name())) { if (GRANT_LOG.isDebugEnabled() && action.logIfAllowed()) { - GRANT_LOG.debug("Authorization GRANTED - cluster: " + clusterName + ", user: " + requestContext.principal() + + GRANT_LOG.debug("Authorization GRANTED - cluster: " + configuration.getClusterName() + ", user: " + requestContext.principal() + ", operation: " + action.operation() + ", resource: " + fromResourcePattern(action.resourcePattern()) + "\nGranted scopes for resource (" + resourceSpec + "): " + (grantedScopes == null ? "ALL" : grantedScopes)); } @@ -543,7 +450,7 @@ private List allowOrDenyBasedOnGrants(AuthorizableRequestCo } } } - results.addAll(delegateIfRequested(requestContext, Collections.singletonList(action), grants)); + results.addAll(delegateIfRequested(delegate, requestContext, Collections.singletonList(action), grants)); } return results; } @@ -559,88 +466,6 @@ private boolean denyIfTokenInvalid(BearerTokenWithPayload token) { return false; } - - - private JsonNode handleFetchingGrants(BearerTokenWithPayload token) { - // Fetch authorization grants - Semaphores.SemaphoreResult semaphore = semaphores.acquireSemaphore(token.value()); - - // Try to acquire semaphore for fetching grants - if (semaphore.acquired()) { - // If acquired - try { - JsonNode grants = null; - if (reuseGrants) { - // If reuseGrants is enabled, first try to get the grants from one of the existing sessions having the same access token - grants = lookupGrantsInExistingSessions(token); - } - if (grants == null) { - // If grants not available it is on us to fetch (others may be waiting) - grants = fetchAndStoreGrants(token); - } else { - log.debug("Found existing grants for the token on another session"); - } - - semaphore.future().complete(grants); - return grants; - - } catch (Throwable t) { - semaphore.future().completeExceptionally(t); - throw t; - } finally { - semaphores.releaseSemaphore(token.value()); - } - - } else { - try { - log.debug("Waiting on another thread to get grants"); - return semaphore.future().get(); - } catch (ExecutionException e) { - Throwable cause = e.getCause(); - if (cause instanceof ServiceException) { - throw (ServiceException) cause; - } else { - throw new ServiceException("ExecutionException waiting for grants result: ", e); - } - } catch (InterruptedException e) { - throw new ServiceException("InterruptedException waiting for grants result: ", e); - } - } - } - - private JsonNode fetchAndStoreGrants(BearerTokenWithPayload token) { - // If no grants found, fetch grants from server - JsonNode grants = null; - try { - log.debug("Fetching grants from Keycloak"); - grants = fetchAuthorizationGrants(token.value()); - if (grants == null) { - log.debug("Received null grants for token: {}", mask(token.value())); - grants = JSONUtil.newObjectNode(); - } - } catch (HttpException e) { - if (e.getStatus() == 403) { - grants = JSONUtil.newObjectNode(); - } else { - log.warn("Unexpected status while fetching authorization data - will retry next time: " + e.getMessage()); - } - } - if (grants != null) { - // Store authz grants in the token, so they are available for subsequent requests - log.debug("Saving non-null grants for token: {}", mask(token.value())); - token.setPayload(grants); - } - return grants; - } - - private static JsonNode lookupGrantsInExistingSessions(BearerTokenWithPayload token) { - Sessions sessions = Services.getInstance().getSessions(); - BearerTokenWithPayload existing = sessions.findFirst(t -> - t.value().equals(token.value()) && t.getPayload() != null - ); - return existing != null ? (JsonNode) existing.getPayload() : null; - } - static List validateScopes(List scopes) { List enumScopes = new ArrayList<>(scopes.size()); for (String name: scopes) { @@ -653,10 +478,10 @@ static List validateScopes(List scopes) { return enumScopes; } - private List delegateIfRequested(AuthorizableRequestContext context, List actions, JsonNode authz) { + private List delegateIfRequested(Authorizer delegate, AuthorizableRequestContext context, List actions, JsonNode authz) { String nonAuthMessageFragment = context.principal() instanceof OAuthKafkaPrincipal ? "" : " non-oauth"; - if (delegateToKafkaACL) { - List results = super.authorize(context, actions); + if (delegate != null) { + List results = delegate.authorize(context, actions); int i = 0; for (AuthorizationResult result: results) { @@ -708,55 +533,15 @@ private String getACLMessage(AuthorizableRequestContext context, String nonAuthM private void logDenied(Logger logger, AuthorizableRequestContext context, JsonNode authz, String nonAuthMessageFragment, Action action) { logger.debug("Authorization DENIED -" + nonAuthMessageFragment + " user: " + context.principal() + - ", cluster: " + clusterName + ", operation: " + action.operation() + + ", cluster: " + configuration.getClusterName() + ", operation: " + action.operation() + ", resource: " + fromResourcePattern(action.resourcePattern()) + ",\n permissions: " + authz); } - - /** - * Method that performs the POST request to fetch grants for the token. - * In case of a connection failure or a non-200 status response this method immediately retries the request if so configured. - *

      - * Status 401 does not trigger a retry since it is used to signal an invalid token. - * Status 403 does not trigger a retry either since it signals no permissions. - * - * @param token The raw access token - * @return Grants JSON response - */ - private JsonNode fetchAuthorizationGrants(String token) { - - int i = 0; - do { - i += 1; - - try { - if (i > 1) { - log.debug("Grants request attempt no. " + i); - } - return fetchAuthorizationGrantsOnce(token); - - } catch (Exception e) { - if (e instanceof HttpException) { - int status = ((HttpException) e).getStatus(); - if (403 == status || 401 == status) { - throw e; - } - } - - log.info("Failed to fetch grants on try no. " + i, e); - if (i > httpRetries) { - log.debug("Failed to fetch grants after " + i + " tries"); - throw e; - } - } - } while (true); - } - private JsonNode fetchAuthorizationGrantsOnce(String token) { String authorization = "Bearer " + token; - StringBuilder body = new StringBuilder("audience=").append(urlencode(clientId)) + StringBuilder body = new StringBuilder("audience=").append(urlencode(configuration.getClientId())) .append("&grant_type=").append(urlencode("urn:ietf:params:oauth:grant-type:uma-ticket")) .append("&response_mode=permissions"); @@ -764,8 +549,8 @@ private JsonNode fetchAuthorizationGrantsOnce(String token) { long startTime = System.currentTimeMillis(); try { - response = post(tokenEndpointUrl, socketFactory, hostnameVerifier, authorization, - "application/x-www-form-urlencoded", body.toString(), JsonNode.class, connectTimeoutSeconds, readTimeoutSeconds); + response = post(configuration.getTokenEndpointUrl(), socketFactory, hostnameVerifier, authorization, + "application/x-www-form-urlencoded", body.toString(), JsonNode.class, configuration.getConnectTimeoutSeconds(), configuration.getReadTimeoutSeconds()); addGrantsHttpMetricSuccessTime(startTime); } catch (HttpException e) { addGrantsHttpMetricErrorTime(e, startTime); @@ -778,193 +563,89 @@ private JsonNode fetchAuthorizationGrantsOnce(String token) { return response; } - private void setupRefreshGrantsJob(int refreshSeconds) { - // Set up periodic timer to fetch grants for active sessions every refresh seconds - ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory()); - - scheduler.scheduleAtFixedRate(this::refreshGrants, refreshSeconds, refreshSeconds, TimeUnit.SECONDS); - } - - private void refreshGrants() { - try { - log.debug("Refreshing authorization grants ..."); - // Multiple sessions can be authenticated with the same access token - // Only make one grants request for one unique access_token, - // but update all sessions for the same token - ConcurrentHashMap> tokens = new ConcurrentHashMap<>(); - - Predicate filter = token -> { - ConcurrentLinkedQueue queue = tokens.computeIfAbsent(token.value(), k -> { - ConcurrentLinkedQueue q = new ConcurrentLinkedQueue<>(); - q.add(token); - return q; - }); - - // If we are the first for the access_token return true - // if not, add the token to the queue, and return false - if (token != queue.peek()) { - queue.add(token); - return false; - } - return true; - }; - - Sessions sessions = Services.getInstance().getSessions(); - List> scheduled = scheduleGrantsRefresh(filter, sessions); - - for (SessionFuture f: scheduled) { - try { - f.get(); - } catch (ExecutionException e) { - log.warn("[IGNORED] Failed to fetch grants for token: " + e.getMessage(), e); - final Throwable cause = e.getCause(); - if (cause instanceof HttpException) { - if (401 == ((HttpException) cause).getStatus()) { - JsonNode emptyGrants = JSONUtil.newObjectNode(); - ConcurrentLinkedQueue queue = tokens.get(f.getToken().value()); - for (BearerTokenWithPayload token: queue) { - token.setPayload(emptyGrants); - sessions.remove(token); - if (log.isDebugEnabled()) { - log.debug("Removed invalid session from sessions map (session: {}, token: {}). Will not refresh its grants any more.", - f.getToken().getSessionId(), mask(f.getToken().value())); - } - } - } - } - } catch (Throwable e) { - log.warn("[IGNORED] Failed to fetch grants for session: " + f.getToken().getSessionId() + ", token: " + mask(f.getToken().value()) + " - " + e.getMessage(), e); - } - } - - // Go over tokens, and copy the grants from the first session - // for same access token to all the others - for (ConcurrentLinkedQueue q: tokens.values()) { - BearerTokenWithPayload refreshed = null; - for (BearerTokenWithPayload t: q) { - if (refreshed == null) { - refreshed = t; - continue; - } - Object oldGrants = t.getPayload(); - Object newGrants = refreshed.getPayload(); - if (newGrants == null) { - newGrants = JSONUtil.newObjectNode(); - } - if (newGrants.equals(oldGrants)) { - // Grants refreshed, but no change - no need to copy over to all sessions - break; - } - if (log.isDebugEnabled()) { - log.debug("Grants have changed for session: {}, token: {}\nbefore: {}\nafter: {}", t.getSessionId(), mask(t.value()), oldGrants, newGrants); - } - t.setPayload(newGrants); - } + @Override + public void close() { + // We don't care about finishing the refresh tasks + if (grantsHandler != null) { + try { + grantsHandler.close(); + } catch (Exception e) { + log.error("Failed to shutdown the worker pool", e); } - - } catch (Throwable t) { - // Log, but don't rethrow the exception to prevent scheduler cancelling the scheduled job. - log.error(t.getMessage(), t); - } finally { - log.debug("Done refreshing grants"); } - } - private List> scheduleGrantsRefresh(Predicate filter, Sessions sessions) { - return sessions.executeTask(workerPool, filter, token -> { - if (log.isTraceEnabled()) { - log.trace("Fetch grants for session: " + token.getSessionId() + ", token: " + mask(token.value())); - } - - JsonNode newGrants; + if (delegate != null) { try { - newGrants = fetchAuthorizationGrants(token.value()); - } catch (HttpException e) { - if (403 == e.getStatus()) { - // 403 happens when no policy matches the token - thus there are no grants - newGrants = JSONUtil.newObjectNode(); - } else { - throw e; - } - } - Object oldGrants = token.getPayload(); - if (!newGrants.equals(oldGrants)) { - if (log.isDebugEnabled()) { - log.debug("Grants have changed for session: {}, token: {}\nbefore: {}\nafter: {}", token.getSessionId(), mask(token.value()), oldGrants, newGrants); - } - token.setPayload(newGrants); - } - }); - } - - @Override - public void close() { - // We don't care about finishing the refresh tasks - try { - if (workerPool != null) { - workerPool.shutdownNow(); + delegate.close(); + } catch (Exception e) { + log.error("Failed to close the delegate authorizer", e); } - } catch (Exception e) { - log.error("Failed to shutdown the worker pool", e); } - super.close(); } @Override public java.util.Map> start(AuthorizerServerInfo serverInfo) { CompletableFuture future = CompletableFuture.completedFuture(null); - if (!delegateToKafkaACL) { + if (delegate == null) { return serverInfo.endpoints().stream().collect(Collectors.toMap(Function.identity(), e -> future)); } - return super.start(serverInfo); + return delegate.start(serverInfo); } @Override public List> createAcls(AuthorizableRequestContext requestContext, List aclBindings) { - if (!delegateToKafkaACL) { + if (delegate == null) { throw new UnsupportedOperationException("Simple ACL delegation not enabled"); } - return super.createAcls(requestContext, aclBindings); + return delegate.createAcls(requestContext, aclBindings); } @Override public List> deleteAcls(AuthorizableRequestContext requestContext, List aclBindingFilters) { - if (!delegateToKafkaACL) { + if (delegate == null) { throw new UnsupportedOperationException("Simple ACL delegation not enabled"); } - return super.deleteAcls(requestContext, aclBindingFilters); + return delegate.deleteAcls(requestContext, aclBindingFilters); } @Override public Iterable acls(AclBindingFilter filter) { - if (!delegateToKafkaACL) { + if (delegate == null) { throw new UnsupportedOperationException("Simple ACL delegation not enabled"); } - return super.acls(filter); + return delegate.acls(filter); } private void addAuthzMetricSuccessTime(long startTimeMs) { - if (enableMetrics) { + if (configuration.isEnableMetrics()) { metrics.addTime(authzSensorKeyProducer.successKey(), System.currentTimeMillis() - startTimeMs); } } private void addAuthzMetricErrorTime(Throwable e, long startTimeMs) { - if (enableMetrics) { + if (configuration.isEnableMetrics()) { metrics.addTime(authzSensorKeyProducer.errorKey(e), System.currentTimeMillis() - startTimeMs); } } private void addGrantsHttpMetricSuccessTime(long startTimeMs) { - if (enableMetrics) { + if (configuration.isEnableMetrics()) { metrics.addTime(grantsSensorKeyProducer.successKey(), System.currentTimeMillis() - startTimeMs); } } private void addGrantsHttpMetricErrorTime(Throwable e, long startTimeMs) { - if (enableMetrics) { + if (configuration.isEnableMetrics()) { metrics.addTime(grantsSensorKeyProducer.errorKey(e), System.currentTimeMillis() - startTimeMs); } } + Configuration getConfiguration() { + return configuration; + } + + @Override + public String toString() { + return KeycloakRBACAuthorizer.class.getSimpleName() + "@" + instanceNumber; + } } diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Semaphores.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Semaphores.java index 945b4385..ba4da693 100644 --- a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Semaphores.java +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/Semaphores.java @@ -8,19 +8,70 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; +/** + * A helper class used to maintain per-user-id semaphores to implement the logic where one thread executes fetching of grants + * while other threads wait and reuse the results. + *

      + *

      + *     Semaphores.SemaphoreResult semaphore = semaphores.acquireSemaphore(key);
      + *
      + *     // Try to acquire semaphore
      + *     if (semaphore.acquired()) {
      + *         // If acquired
      + *         try {
      + *             // Obtain result
      + *             JsonNode result = getResult();
      + *
      + *             // Make results available to other threads that didn't manage to acquire the semaphore
      + *             semaphore.future().complete(result);
      + *             return result;
      + *
      + *         } catch (Throwable t) {
      + *             semaphore.future().completeExceptionally(t);
      + *             throw t;
      + *         } finally {
      + *             // Release the semaphore in finally block
      + *             semaphores.releaseSemaphore(userId);
      + *         }
      + *     } else {
      + *         // Wait on the thread that acquired the semaphore to provide the result
      + *         return semaphore.future().get();
      + *     }
      + * 
      + * + * @param A result type (e.g. JsonNode) + */ class Semaphores { private final ConcurrentHashMap> futures = new ConcurrentHashMap<>(); + /** + * Call this method so acquire semaphore for the key + * + * @param key The key + * @return SemaphoreResult which contains information on whether the semaphore was acquired, and Future with the results + */ SemaphoreResult acquireSemaphore(String key) { Semaphore semaphore = futures.computeIfAbsent(key, v -> new Semaphore<>()); return new SemaphoreResult<>(semaphore); } + /** + * Call this method in finally block to release the semaphore for the key + * + * @param key The key + */ void releaseSemaphore(String key) { futures.remove(key); } + /** + * Implementation of a semaphore. + *

      + * Only the first call to {@link #tryAcquire()} will return true + * + * @param Future result type + */ static class Semaphore { private final CompletableFuture future = new CompletableFuture<>(); @@ -33,6 +84,11 @@ private boolean tryAcquire() { } } + /** + * An object which contains information on whether the semaphore was acquired, and Future with the results. + * + * @param Future result type + */ static class SemaphoreResult { private final boolean acquired; diff --git a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/UserSpec.java b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/UserSpec.java index 443fabbe..7c853d1f 100644 --- a/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/UserSpec.java +++ b/oauth-keycloak-authorizer/src/main/java/io/strimzi/kafka/oauth/server/authorizer/UserSpec.java @@ -4,6 +4,8 @@ */ package io.strimzi.kafka.oauth.server.authorizer; +import java.util.Objects; + /** * A class used to hold parsed superusers specs */ @@ -56,6 +58,19 @@ public static UserSpec of(String principal) { return new UserSpec(principal.substring(0, pos), principal.substring(pos + 1)); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UserSpec userSpec = (UserSpec) o; + return type.equals(userSpec.type) && name.equals(userSpec.name); + } + + @Override + public int hashCode() { + return Objects.hash(type, name); + } + public String toString() { return super.toString() + " " + type + ":" + name; } diff --git a/oauth-server-plain/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilderTest.java b/oauth-server-plain/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilderTest.java index 5fc21f44..360225c2 100644 --- a/oauth-server-plain/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilderTest.java +++ b/oauth-server-plain/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilderTest.java @@ -5,6 +5,7 @@ package io.strimzi.kafka.oauth.server; import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; +import io.strimzi.kafka.oauth.common.JSONUtil; import io.strimzi.kafka.oauth.services.Credentials; import io.strimzi.kafka.oauth.services.Principals; import io.strimzi.kafka.oauth.services.Services; @@ -34,7 +35,7 @@ public void testPreviousStoredPrincipalIsReused() { // Simulate authentication using OAuth over PLAIN BearerTokenWithPayload token = mock(BearerTokenWithPayload.class); - when(token.getPayload()).thenReturn("jwttoken"); + when(token.getPayload()).thenReturn(JSONUtil.asJson("{}")); OAuthKafkaPrincipal authenticatedPrincipal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, USERNAME, token); credentials.storeCredentials(USERNAME, authenticatedPrincipal); diff --git a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/BearerTokenWithJsonPayload.java b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/BearerTokenWithJsonPayload.java new file mode 100644 index 00000000..5b052bee --- /dev/null +++ b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/BearerTokenWithJsonPayload.java @@ -0,0 +1,171 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; +import io.strimzi.kafka.oauth.common.JSONUtil; +import io.strimzi.kafka.oauth.common.TimeUtil; +import io.strimzi.kafka.oauth.common.TokenInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +class BearerTokenWithJsonPayload implements BearerTokenWithPayload { + + private final static Logger log = LoggerFactory.getLogger(BearerTokenWithJsonPayload.class); + + private final TokenInfo ti; + private volatile JsonNode payload; + + private int sessionId = System.identityHashCode(this); + + BearerTokenWithJsonPayload(TokenInfo ti) { + if (ti == null) { + throw new IllegalArgumentException("TokenInfo == null"); + } + this.ti = ti; + } + + @Override + public JsonNode getPayload() { + return payload; + } + + @Override + public void setPayload(JsonNode value) { + payload = value; + } + + @Override + public Set getGroups() { + return ti.groups(); + } + + @Override + public ObjectNode getClaimsJSON() { + return ti.payload(); + } + + @Override + public String value() { + return ti.token(); + } + + @Override + public Set scope() { + return ti.scope(); + } + + @Override + public long lifetimeMs() { + return ti.expiresAtMs(); + } + + @Override + public String principalName() { + return ti.principal(); + } + + @Override + public Long startTimeMs() { + return ti.issuedAtMs(); + } + + @Override + public int getSessionId() { + return sessionId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BearerTokenWithJsonPayload that = (BearerTokenWithJsonPayload) o; + return Objects.equals(ti, that.ti); + } + + @Override + public int hashCode() { + return Objects.hash(ti); + } + + @Override + public String toString() { + return "BearerTokenWithPayloadImpl (principalName: " + ti.principal() + ", groups: " + ti.groups() + ", lifetimeMs: " + + ti.expiresAtMs() + " [" + TimeUtil.formatIsoDateTimeUTC(ti.expiresAtMs()) + " UTC], startTimeMs: " + + ti.issuedAtMs() + " [" + TimeUtil.formatIsoDateTimeUTC(ti.issuedAtMs()) + " UTC], scope: " + ti.scope() + ", payload: " + ti.payload() + ", sessionId: " + sessionId + ")"; + } + + static class Serde { + + private static final String TOKEN = "t"; + private static final String SCOPES = "sc"; + private static final String GROUPS = "g"; + private static final String PRINCIPAL = "n"; + private static final String START_TIME = "st"; + private static final String EXPIRY_TIME = "e"; + private static final String TOKEN_CLAIMS = "j"; + private static final String EXTRA_PAYLOAD = "p"; + private static final String SESSION_ID = "si"; + + + public byte[] serialize(BearerTokenWithJsonPayload token) throws IOException { + ObjectNode object = JSONUtil.newObjectNode(); + object.put(PRINCIPAL, token.principalName()); + JSONUtil.setArrayOfStringsIfNotNull(object, GROUPS, token.getGroups()); + JSONUtil.setArrayOfStringsIfNotNull(object, SCOPES, token.scope()); + object.put(TOKEN, token.value()); + object.put(START_TIME, token.startTimeMs()); + object.put(EXPIRY_TIME, token.lifetimeMs()); + object.set(TOKEN_CLAIMS, token.getClaimsJSON()); + + object.set(EXTRA_PAYLOAD, token.getPayload()); + if (token.getPayload() == null) { + logTrace("Serialising a token without an extra payload: " + token); + } else { + logTrace("Serialising a token with an extra payload: " + token); + } + object.put(SESSION_ID, token.sessionId); + + logTrace("Serialising a token: {}", token); + return JSONUtil.MAPPER.writeValueAsBytes(object); + } + + public BearerTokenWithJsonPayload deserialize(byte[] bytes) throws IOException { + ObjectNode object = JSONUtil.MAPPER.readValue(bytes, ObjectNode.class); + JsonNode groups = object.get(GROUPS); + JsonNode scopes = object.get(SCOPES); + JsonNode json = object.get(TOKEN_CLAIMS); + JsonNode payload = object.get(EXTRA_PAYLOAD); + int sessionId = object.get(SESSION_ID).asInt(); + BearerTokenWithJsonPayload result = new BearerTokenWithJsonPayload( + new TokenInfo(object.get(TOKEN).asText(), + scopes != null && scopes.isArray() ? new HashSet<>(JSONUtil.asListOfString(scopes, ",")) : null, + object.get(PRINCIPAL).asText(), + groups != null && groups.isArray() ? new HashSet<>(JSONUtil.asListOfString(groups, ",")) : null, + object.get(START_TIME).asLong(), + object.get(EXPIRY_TIME).asLong(), + json.isNull() ? null : json)); + + result.sessionId = sessionId; + result.setPayload(payload); + logTrace("Deserialised a token: {}", result); + + return result; + } + + private void logTrace(String message, Object... args) { + if (log.isTraceEnabled()) { + log.trace(message, args); + } + } + } +} diff --git a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/JaasServerOauthValidatorCallbackHandler.java b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/JaasServerOauthValidatorCallbackHandler.java index f082f9f1..978caba9 100644 --- a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/JaasServerOauthValidatorCallbackHandler.java +++ b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/JaasServerOauthValidatorCallbackHandler.java @@ -4,14 +4,11 @@ */ package io.strimzi.kafka.oauth.server; -import com.fasterxml.jackson.databind.node.ObjectNode; import io.strimzi.kafka.oauth.common.Config; import io.strimzi.kafka.oauth.common.ConfigException; import io.strimzi.kafka.oauth.common.ConfigUtil; -import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; import io.strimzi.kafka.oauth.common.IOUtil; import io.strimzi.kafka.oauth.common.PrincipalExtractor; -import io.strimzi.kafka.oauth.common.TimeUtil; import io.strimzi.kafka.oauth.jsonpath.JsonPathFilterQuery; import io.strimzi.kafka.oauth.metrics.IntrospectValidationSensorKeyProducer; import io.strimzi.kafka.oauth.metrics.JwksValidationSensorKeyProducer; @@ -40,9 +37,7 @@ import java.net.URI; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Properties; -import java.util.Set; import java.util.function.Supplier; import static io.strimzi.kafka.oauth.common.DeprecationUtil.isAccessTokenJwt; @@ -605,7 +600,7 @@ private void handleCallback(OAuthBearerValidatorCallback callback) { debugLogToken(token); TokenInfo ti = validateToken(token); - callback.token(new BearerTokenWithPayloadImpl(ti)); + callback.token(new BearerTokenWithJsonPayload(ti)); if (log.isDebugEnabled()) { log.debug("Set validated token on callback: " + callback.token()); } @@ -732,84 +727,6 @@ public int getReadTimeout() { return readTimeout; } - static class BearerTokenWithPayloadImpl implements BearerTokenWithPayload { - - private final TokenInfo ti; - private volatile Object payload; - - BearerTokenWithPayloadImpl(TokenInfo ti) { - if (ti == null) { - throw new IllegalArgumentException("TokenInfo == null"); - } - this.ti = ti; - } - - @Override - public synchronized Object getPayload() { - return payload; - } - - @Override - public synchronized void setPayload(Object value) { - payload = value; - } - - @Override - public Set getGroups() { - return ti.groups(); - } - - @Override - public ObjectNode getJSON() { - return ti.payload(); - } - - @Override - public String value() { - return ti.token(); - } - - @Override - public Set scope() { - return ti.scope(); - } - - @Override - public long lifetimeMs() { - return ti.expiresAtMs(); - } - - @Override - public String principalName() { - return ti.principal(); - } - - @Override - public Long startTimeMs() { - return ti.issuedAtMs(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - BearerTokenWithPayloadImpl that = (BearerTokenWithPayloadImpl) o; - return Objects.equals(ti, that.ti); - } - - @Override - public int hashCode() { - return Objects.hash(ti); - } - - @Override - public String toString() { - return "BearerTokenWithPayloadImpl (principalName: " + ti.principal() + ", groups: " + ti.groups() + ", lifetimeMs: " + - ti.expiresAtMs() + " [" + TimeUtil.formatIsoDateTimeUTC(ti.expiresAtMs()) + " UTC], startTimeMs: " + - ti.issuedAtMs() + " [" + TimeUtil.formatIsoDateTimeUTC(ti.issuedAtMs()) + " UTC], scope: " + ti.scope() + ")"; - } - } - protected String getConfigId() { if (validator == null) { throw new IllegalStateException("This method can only be invoked after the validator was configured"); diff --git a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipal.java b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipal.java index 11de4c52..bef9d848 100644 --- a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipal.java +++ b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipal.java @@ -17,7 +17,7 @@ /** * This class extends the KafkaPrincipal object to store additional info obtained at session authentication time, * and required later by a custom authorizer. - * + *

      * Any additional fields should not be included in equals / hashcode check. If they are, that will break re-authentication. */ @SuppressFBWarnings("EQ_DOESNT_OVERRIDE_EQUALS") @@ -26,30 +26,6 @@ public final class OAuthKafkaPrincipal extends KafkaPrincipal { private final BearerTokenWithPayload jwt; private final Set groups; - /** - * Create a new instance - * - * @param principalType Principal type (e.g. USER) - * @param name A name - */ - public OAuthKafkaPrincipal(String principalType, String name) { - this(principalType, name, (Set) null); - } - - /** - * Create a new instance - * - * @param principalType Principal type (e.g. USER) - * @param name A name - * @param groups A set of groups for the user - */ - public OAuthKafkaPrincipal(String principalType, String name, Set groups) { - super(principalType, name); - this.jwt = null; - - this.groups = groups == null ? null : Collections.unmodifiableSet(groups); - } - /** * Create a new instance, and extract groups info from the passed {@link BearerTokenWithPayload} * @@ -61,7 +37,7 @@ public OAuthKafkaPrincipal(String principalType, String name, Set groups public OAuthKafkaPrincipal(String principalType, String name, BearerTokenWithPayload jwt) { super(principalType, name); this.jwt = jwt; - Set parsedGroups = jwt.getGroups(); + Set parsedGroups = jwt != null ? jwt.getGroups() : null; this.groups = parsedGroups == null ? null : Collections.unmodifiableSet(parsedGroups); } diff --git a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilder.java b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilder.java index 6882f649..f8abb82b 100644 --- a/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilder.java +++ b/oauth-server/src/main/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalBuilder.java @@ -9,6 +9,11 @@ import io.strimzi.kafka.oauth.services.Services; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.message.DefaultPrincipalData; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.security.auth.AuthenticationContext; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.auth.SaslAuthenticationContext; @@ -18,9 +23,11 @@ import org.apache.kafka.common.security.plain.internals.PlainSaslServer; import javax.security.sasl.SaslServer; +import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.nio.ByteBuffer; import java.security.PrivilegedAction; import java.util.Collections; import java.util.List; @@ -42,11 +49,12 @@ * property definition in server.properties to install it. *

      */ -@SuppressWarnings({"deprecation", "removal"}) public class OAuthKafkaPrincipalBuilder extends DefaultKafkaPrincipalBuilder implements Configurable { private static final SetAccessibleAction SET_PRINCIPAL_MAPPER = SetAccessibleAction.newInstance(); + private static final int OAUTH_DATA_TAG = 575; + private static class SetAccessibleAction implements PrivilegedAction { private final Field field; @@ -157,4 +165,51 @@ public KafkaPrincipal build(AuthenticationContext context) { return super.build(context); } + + + @Override + public byte[] serialize(KafkaPrincipal principal) { + if (principal instanceof OAuthKafkaPrincipal) { + DefaultPrincipalData data = new DefaultPrincipalData() + .setType(principal.getPrincipalType()) + .setName(principal.getName()) + .setTokenAuthenticated(principal.tokenAuthenticated()); + BearerTokenWithPayload token = ((OAuthKafkaPrincipal) principal).getJwt(); + if (token instanceof BearerTokenWithJsonPayload) { + try { + data.unknownTaggedFields().add(new RawTaggedField(OAUTH_DATA_TAG, new BearerTokenWithJsonPayload.Serde().serialize((BearerTokenWithJsonPayload) token))); + } catch (IOException e) { + throw new SerializationException("Failed to serialize OAuthKafkaPrincipal", e); + } + } + + return MessageUtil.toVersionPrefixedBytes(DefaultPrincipalData.HIGHEST_SUPPORTED_VERSION, data); + } + return super.serialize(principal); + } + + @Override + public KafkaPrincipal deserialize(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.wrap(bytes); + short version = buffer.getShort(); + if (version < DefaultPrincipalData.LOWEST_SUPPORTED_VERSION || version > DefaultPrincipalData.HIGHEST_SUPPORTED_VERSION) { + throw new SerializationException("Invalid principal data version " + version); + } + + DefaultPrincipalData data = new DefaultPrincipalData(new ByteBufferAccessor(buffer), version); + List unknownFields = data.unknownTaggedFields(); + if (unknownFields.size() > 0) { + RawTaggedField field = unknownFields.get(0); + if (field.tag() == OAUTH_DATA_TAG) { + try { + OAuthKafkaPrincipal result = new OAuthKafkaPrincipal(data.type(), data.name(), new BearerTokenWithJsonPayload.Serde().deserialize(field.data())); + result.tokenAuthenticated(data.tokenAuthenticated()); + return result; + } catch (IOException e) { + throw new SerializationException("Failed to de-serialize OAuthKafkaPrincipal", e); + } + } + } + return new KafkaPrincipal(data.type(), data.name(), data.tokenAuthenticated()); + } } diff --git a/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/MockBearerTokenWithPayload.java b/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/MockBearerTokenWithPayload.java index 8058ab51..0d37b485 100644 --- a/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/MockBearerTokenWithPayload.java +++ b/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/MockBearerTokenWithPayload.java @@ -4,6 +4,7 @@ */ package io.strimzi.kafka.oauth.server; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; @@ -20,9 +21,9 @@ public class MockBearerTokenWithPayload implements BearerTokenWithPayload { private final long lifetime; private final Set scopes; private final String token; - private Object payload; + private JsonNode payload; - MockBearerTokenWithPayload(String principalName, Set groups, long createTime, long lifetime, String scope, String token, Object payload) { + MockBearerTokenWithPayload(String principalName, Set groups, long createTime, long lifetime, String scope, String token, JsonNode payload) { this.principalName = principalName; this.groups = groups; this.createTime = createTime; @@ -38,12 +39,12 @@ public class MockBearerTokenWithPayload implements BearerTokenWithPayload { } @Override - public Object getPayload() { + public JsonNode getPayload() { return payload; } @Override - public void setPayload(Object payload) { + public void setPayload(JsonNode payload) { this.payload = payload; } @@ -53,7 +54,7 @@ public Set getGroups() { } @Override - public ObjectNode getJSON() { + public ObjectNode getClaimsJSON() { return null; } diff --git a/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalTest.java b/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalTest.java index 3128338f..f954acb3 100644 --- a/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalTest.java +++ b/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthKafkaPrincipalTest.java @@ -5,9 +5,11 @@ package io.strimzi.kafka.oauth.server; import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; import io.strimzi.kafka.oauth.common.JSONUtil; import io.strimzi.kafka.oauth.common.TokenInfo; +import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.junit.Assert; import org.junit.Test; @@ -22,7 +24,7 @@ public class OAuthKafkaPrincipalTest { public void testEquals() { BearerTokenWithPayload token = new MockBearerTokenWithPayload("service-account-my-client", new HashSet<>(Arrays.asList("group1", "group2")), - System.currentTimeMillis(), System.currentTimeMillis() + 60000, null, "BEARER-TOKEN-9823eh982u", "Whatever"); + System.currentTimeMillis(), System.currentTimeMillis() + 60000, null, "BEARER-TOKEN-9823eh982u", JSONUtil.asJson("{}")); OAuthKafkaPrincipal principal = new OAuthKafkaPrincipal("User", "service-account-my-client", token); @@ -31,9 +33,9 @@ public void testEquals() { OAuthKafkaPrincipal principal2 = new OAuthKafkaPrincipal("User", "service-account-my-client", token2); - OAuthKafkaPrincipal principal3 = new OAuthKafkaPrincipal("User", "service-account-my-client"); + OAuthKafkaPrincipal principal3 = new OAuthKafkaPrincipal("User", "service-account-my-client", null); - OAuthKafkaPrincipal principal4 = new OAuthKafkaPrincipal("User", "bob"); + OAuthKafkaPrincipal principal4 = new OAuthKafkaPrincipal("User", "bob", null); Assert.assertEquals("principal should be equal to principal2", principal, principal2); @@ -72,9 +74,41 @@ public void testJwtPrincipal() throws IOException { JsonNode parsed = JSONUtil.readJSON(json, JsonNode.class); TokenInfo tki = new TokenInfo(parsed, rawToken, "bob"); - BearerTokenWithPayload jwt = new JaasServerOauthValidatorCallbackHandler.BearerTokenWithPayloadImpl(tki); + BearerTokenWithPayload jwt = new BearerTokenWithJsonPayload(tki); OAuthKafkaPrincipal principalJwt = new OAuthKafkaPrincipal("User", "bob", jwt); - Assert.assertEquals("Can access parsed JWT", parsed, principalJwt.getJwt().getJSON()); + Assert.assertEquals("Can access parsed JWT", parsed, principalJwt.getJwt().getClaimsJSON()); + } + + @Test + public void testSerialisation() throws IOException { + + OAuthKafkaPrincipalBuilder builder = new OAuthKafkaPrincipalBuilder(); + + HashSet groups = new HashSet<>(); + groups.add("admins"); + groups.add("operations"); + + HashSet scopes = new HashSet<>(); + scopes.add("offline"); + scopes.add("openid"); + + long iat = System.currentTimeMillis() - 3600_000; + long exp = System.currentTimeMillis() + 3600_000; + String jwtBody = "{\"username\":\"alice\",\"exp\":" + exp / 1000 + ",\"groups\":[\"admins\",\"operations\"]}"; + TokenInfo tokenInfo = new TokenInfo("mock.accesstoken.", scopes, "alice", groups, iat, exp, JSONUtil.readJSON(jwtBody, ObjectNode.class)); + BearerTokenWithJsonPayload token = new BearerTokenWithJsonPayload(tokenInfo); + OAuthKafkaPrincipal principal = new OAuthKafkaPrincipal("User", "alice", token); + + + byte[] serialized = builder.serialize(principal); + KafkaPrincipal deserialized = builder.deserialize(serialized); + + Assert.assertTrue("Is OAuthKafkaPrincipal", deserialized instanceof OAuthKafkaPrincipal); + OAuthKafkaPrincipal oauthDeserialized = (OAuthKafkaPrincipal) deserialized; + Assert.assertEquals("name", principal.getName(), oauthDeserialized.getName()); + Assert.assertEquals("type", principal.getPrincipalType(), oauthDeserialized.getPrincipalType()); + Assert.assertEquals("groups", principal.getGroups(), oauthDeserialized.getGroups()); + Assert.assertEquals("tokenInfo", principal.getJwt(), oauthDeserialized.getJwt()); } } diff --git a/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthSessionAuthorizerTest.java b/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthSessionAuthorizerTest.java index 880b666e..bb6f6ffd 100644 --- a/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthSessionAuthorizerTest.java +++ b/oauth-server/src/test/java/io/strimzi/kafka/oauth/server/OAuthSessionAuthorizerTest.java @@ -104,9 +104,9 @@ private void testOAuthUserWithDelegate(Authorizer authorizer, MockAuthorizer del // Prepare condition after mock OAuth athentication with valid token TokenInfo tokenInfo = new TokenInfo("accesstoken123", null, "User:bob", new HashSet<>(Arrays.asList("group1", "group2")), - System.currentTimeMillis() - 100000, - System.currentTimeMillis() + 100000); - BearerTokenWithPayload token = new JaasServerOauthValidatorCallbackHandler.BearerTokenWithPayloadImpl(tokenInfo); + System.currentTimeMillis() - 100_000, + System.currentTimeMillis() + 100_000); + BearerTokenWithPayload token = new BearerTokenWithJsonPayload(tokenInfo); AuthorizableRequestContext ctx = requestContext(new OAuthKafkaPrincipal("User", "bob", token)); @@ -118,7 +118,7 @@ private void testOAuthUserWithDelegate(Authorizer authorizer, MockAuthorizer del true )); - // authorize() call should be delegated because the OAuthKafkaPrincipa contains a valid token + // authorize() call should be delegated because the OAuthKafkaPrincipal contains a valid token List results = authorizer.authorize(ctx, actions); MockAuthorizerLog lastEntry = delegateAuthorizer.invocationLog.getLast(); @@ -134,9 +134,9 @@ public void testOAuthUserWithExpiredTokenWithDelegate(Authorizer authorizer, Moc // Make it so that the token is expired TokenInfo tokenInfo = new TokenInfo("accesstoken234", null, "User:bob", null, - System.currentTimeMillis() - 200000, - System.currentTimeMillis() - 100000); - BearerTokenWithPayload token = new JaasServerOauthValidatorCallbackHandler.BearerTokenWithPayloadImpl(tokenInfo); + System.currentTimeMillis() - 200_000, + System.currentTimeMillis() - 100_000); + BearerTokenWithPayload token = new BearerTokenWithJsonPayload(tokenInfo); AuthorizableRequestContext ctx = requestContext(new OAuthKafkaPrincipal("User", "bob", token)); @@ -148,7 +148,7 @@ public void testOAuthUserWithExpiredTokenWithDelegate(Authorizer authorizer, Moc true )); - // authorize() call should not be delegated because the OAuthKafkaPrincipa contains an expired token + // authorize() call should not be delegated because the OAuthKafkaPrincipal contains an expired token List results = authorizer.authorize(ctx, actions); MockAuthorizerLog lastEntry = delegateAuthorizer.invocationLog.getLast(); @@ -217,7 +217,7 @@ private void testNonOAuthUserWithoutDelegate(Authorizer authorizer) throws Excep true )); - // authorize() call should be delegated because the OAuthKafkaPrincipa contains a valid token + // authorize() call should be delegated because the OAuthKafkaPrincipal contains a valid token List results = authorizer.authorize(ctx, actions); Assert.assertEquals("Should be allowed", AuthorizationResult.ALLOWED, results.get(0)); @@ -227,9 +227,9 @@ private void testOAuthUserWithoutDelegate(Authorizer authorizer) throws Exceptio // Prepare condition after mock OAuth athentication with valid token TokenInfo tokenInfo = new TokenInfo("accesstoken123", null, "User:bob", null, - System.currentTimeMillis() - 100000, - System.currentTimeMillis() + 100000); - BearerTokenWithPayload token = new JaasServerOauthValidatorCallbackHandler.BearerTokenWithPayloadImpl(tokenInfo); + System.currentTimeMillis() - 100_000, + System.currentTimeMillis() + 100_000); + BearerTokenWithPayload token = new BearerTokenWithJsonPayload(tokenInfo); AuthorizableRequestContext ctx = requestContext(new OAuthKafkaPrincipal("User", "bob", token)); @@ -241,7 +241,7 @@ private void testOAuthUserWithoutDelegate(Authorizer authorizer) throws Exceptio true )); - // authorize() call should not be delegated because the OAuthKafkaPrincipa contains an expired token + // authorize() call should not be delegated because the OAuthKafkaPrincipal contains an expired token List results = authorizer.authorize(ctx, actions); Assert.assertEquals("Should be allowed", AuthorizationResult.ALLOWED, results.get(0)); @@ -251,9 +251,9 @@ private void testOAuthUserWithExpiredTokenWithoutDelegate(Authorizer authorizer) // Make it so that the token is expired TokenInfo tokenInfo = new TokenInfo("accesstoken234", null, "User:bob", null, - System.currentTimeMillis() - 200000, - System.currentTimeMillis() - 100000); - BearerTokenWithPayload token = new JaasServerOauthValidatorCallbackHandler.BearerTokenWithPayloadImpl(tokenInfo); + System.currentTimeMillis() - 200_000, + System.currentTimeMillis() - 100_000); + BearerTokenWithPayload token = new BearerTokenWithJsonPayload(tokenInfo); OAuthKafkaPrincipal principal = new OAuthKafkaPrincipal("User", "bob", token); List actions = Collections.singletonList( @@ -329,7 +329,6 @@ static class MockAuthorizerLog { private List actions; private Map config; - long entryTime = System.currentTimeMillis(); MockAuthorizerLog(AuthorizableRequestContext requestContext, List actions) { this.type = MockAuthorizerType.AUTHORIZE; @@ -345,11 +344,7 @@ static class MockAuthorizerLog { enum MockAuthorizerType { CONFIGURE, - AUTHORIZE, - ADD_ACLS, - REMOVE_ACLS, - GET_ACLS, - CLOSE + AUTHORIZE } } diff --git a/pom.xml b/pom.xml index c9000e0b..43cca7ad 100644 --- a/pom.xml +++ b/pom.xml @@ -103,11 +103,11 @@ 3.3.0 4.7.0 1.6.3 - 3.3.1 + 3.4.0 2.13.4 2.13.4.2 2.6.0 - 4.13.1 + 4.13.2 1.7.36 3.12.4 9.10 @@ -156,6 +156,11 @@ kafka_2.13 ${kafka.version} + + org.apache.kafka + kafka-metadata + ${kafka.version} + io.strimzi kafka-oauth-common diff --git a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/ContainerLogLineReader.java b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/ContainerLogLineReader.java new file mode 100644 index 00000000..a732afdb --- /dev/null +++ b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/ContainerLogLineReader.java @@ -0,0 +1,51 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.common; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +/** + * This class allows incremental reading of the log from a specified docker container. + */ +public class ContainerLogLineReader { + + private final String containerName; + private int logLineOffset = 0; + + /** + * Create a new instance of the log for the specified container name + * + * @param containerName The name of the target docker container + */ + public ContainerLogLineReader(String containerName) { + this.containerName = containerName; + } + + /** + * Fetch the whole log again and skip all the lines at the beginning that have already been read in the previous calls. + * + * @return Newly added lines in the log + * @throws IOException If an operation fails + */ + public List readNext() throws IOException { + List lines = new ArrayList<>(); + Process p = Runtime.getRuntime().exec(new String[] {"docker", "logs", containerName}); + try (BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream(), StandardCharsets.ISO_8859_1))) { + String line; + while ((line = r.readLine()) != null) { + lines.add(line); + } + } + + List result = lines.subList(logLineOffset, lines.size()); + logLineOffset = lines.size(); + return result; + } +} diff --git a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestContainersWatcher.java b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestContainersWatcher.java index b15aded9..98cf31e5 100644 --- a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestContainersWatcher.java +++ b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestContainersWatcher.java @@ -15,6 +15,9 @@ import org.testcontainers.containers.wait.strategy.WaitStrategy; import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.ArrayList; import java.util.List; import java.util.Optional; @@ -22,7 +25,7 @@ public class TestContainersWatcher implements TestRule { - private DockerComposeContainer environment; + private final DockerComposeContainer environment; private boolean collectLogs; @@ -86,6 +89,14 @@ protected void outputLogs() { environment.getContainerByServiceName("hydra-jwt_1").ifPresent(c -> System.out.println("\n\n'hydra-jwt' log:\n\n" + c.getLogs() + "\n")); environment.getContainerByServiceName("hydra-import_1").ifPresent(c -> System.out.println("\n\n'hydra-import' log:\n\n" + c.getLogs() + "\n")); environment.getContainerByServiceName("hydra-jwt-import_1").ifPresent(c -> System.out.println("\n\n'hydra-jwt-import' log:\n\n" + c.getLogs() + "\n")); + File testLog = new File("target/test.log"); + if (testLog.isFile()) { + try { + System.out.println("\n\n'mockoauth-tests' test.log:\n\n" + new String(Files.readAllBytes(testLog.toPath()), StandardCharsets.ISO_8859_1) + "\n"); + } catch (IOException e) { + System.out.println("Failed to read file: " + testLog.getAbsolutePath()); + } + } } public void collectLogsOnExit() { diff --git a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestMetrics.java b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestMetrics.java index 2dbea012..3ddffebb 100644 --- a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestMetrics.java +++ b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestMetrics.java @@ -25,7 +25,7 @@ public class TestMetrics { /** * Get response from prometheus endpoint as a map of key:value pairs - * We expect the response to be a 'well formed' key=value document in the sense that each line contains a '=' sign + * We expect the response to be a 'well-formed' key=value document in the sense that each line contains a '=' sign * * @param metricsEndpointUri The endpoint used to fetch metrics * @return Metrics object @@ -83,7 +83,7 @@ void addMetric(String key, Map attrs, String value) { /** * Returns a value of a single metric matching the key and the attributes. - * + *

      * Attributes are specified as: name1, value1, name2, value2, ... * Not all attributes have to be specified, but those specified have to match (equality). * @@ -116,7 +116,7 @@ public String getValue(String key, String... attrs) { /** * Get the sum of values of all the metrics matching the key and the attributes - * + *

      * Attributes are specified as: name1, value1, name2, value2, ... * Not all attributes have to be specified, but those specified have to match (equality). * diff --git a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestUtil.java b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestUtil.java index cbe98677..d90e06fc 100644 --- a/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestUtil.java +++ b/testsuite/common/src/main/java/io/strimzi/testsuite/oauth/common/TestUtil.java @@ -11,6 +11,8 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; import java.util.regex.Pattern; public class TestUtil { @@ -25,11 +27,11 @@ public static String unquote(String value) { * Get Kafka log by executing 'docker logs kafka', then extract only the entries * (possibly multi-line when there's a stacktrace) that contain the passed filter. * - * @param filter The string to look for (not a regex) in the log + * @param filters Strings to look for (not a regex) in the log - they all must be present in a line for the line to match * @return A list of lines from the log that match the filter (logging entries that contain the filter string) */ @SuppressFBWarnings("THROWS_METHOD_THROWS_RUNTIMEEXCEPTION") - public static List getContainerLogsForString(String containerName, String filter) { + public static List getContainerLogsForString(String containerName, String... filters) { try { boolean inmatch = false; ArrayList result = new ArrayList<>(); @@ -42,7 +44,13 @@ public static List getContainerLogsForString(String containerName, Strin // is new logging entry? if (pat.matcher(line).matches()) { // contains the err string? - inmatch = line.contains(filter); + // all filters have to match + for (String filter: filters) { + inmatch = line.contains(filter); + if (!inmatch) { + break; + } + } } if (inmatch) { result.add(line); @@ -55,4 +63,34 @@ public static List getContainerLogsForString(String containerName, Strin throw new RuntimeException("Failed to get '" + containerName + "' log", e); } } + + /** + * Helper method to wait for a condition by periodically testing the condition until it is satisfied or until timeout. + * + * @param condition The condition to test + * @param loopPauseMs A pause between two repeats in millis + * @param timeoutSeconds A timeout in seconds + * @throws TimeoutException An exception thrown if condition not satisfied within a timeout + * @throws InterruptedException An exception thrown if interrupted + */ + public static void waitForCondition(Supplier condition, int loopPauseMs, int timeoutSeconds) throws TimeoutException, InterruptedException { + long startTime = System.currentTimeMillis(); + boolean done; + do { + done = condition.get(); + if (!done) { + // Condition not met + if (System.currentTimeMillis() + loopPauseMs - startTime >= timeoutSeconds * 1000L) { + throw new TimeoutException("Condition not met in " + timeoutSeconds + " seconds"); + } + Thread.sleep(loopPauseMs); + } + } while (!done); + } + + public static void logStart(String msg) { + System.out.println(); + System.out.println("======== " + msg); + System.out.println(); + } } diff --git a/testsuite/docker/kafka/Dockerfile b/testsuite/docker/kafka/Dockerfile index 2fd56f24..066bca3b 100644 --- a/testsuite/docker/kafka/Dockerfile +++ b/testsuite/docker/kafka/Dockerfile @@ -1,4 +1,4 @@ -FROM quay.io/strimzi/kafka:0.33.2-kafka-3.4.0 +FROM quay.io/strimzi/kafka:0.34.0-kafka-3.4.0 USER root RUN rm -rf /opt/kafka/libs/bcpkix* /opt/kafka/libs/bcprov* /opt/kafka/libs/keycloak* diff --git a/testsuite/docker/kafka/config/log4j.properties b/testsuite/docker/kafka/config/log4j.properties index 085f5f61..f8a1a7d2 100644 --- a/testsuite/docker/kafka/config/log4j.properties +++ b/testsuite/docker/kafka/config/log4j.properties @@ -64,6 +64,7 @@ log4j.logger.org.apache.zookeeper=INFO # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) log4j.logger.kafka=INFO log4j.logger.org.apache.kafka=INFO +#log4j.logger.kafka.server.metadata.BrokerMetadataListener=DEBUG # Control Strimzi OAuth logging log4j.logger.io.strimzi=TRACE diff --git a/testsuite/docker/kafka/pom.xml b/testsuite/docker/kafka/pom.xml index af270235..dfe4eb86 100644 --- a/testsuite/docker/kafka/pom.xml +++ b/testsuite/docker/kafka/pom.xml @@ -31,7 +31,7 @@ - testsuite/kafka:3.3.1 + testsuite/kafka:3.4.0 diff --git a/testsuite/docker/kafka/scripts/simple_kafka_config.sh b/testsuite/docker/kafka/scripts/simple_kafka_config.sh index 2ae04f26..48ad9c60 100755 --- a/testsuite/docker/kafka/scripts/simple_kafka_config.sh +++ b/testsuite/docker/kafka/scripts/simple_kafka_config.sh @@ -52,17 +52,37 @@ done # # Generate output # -echo "#" -echo "# strimzi.properties" -echo "#" -echo broker.id=`pop_value broker.id 0` +if [[ "$1" == "--kraft" ]]; then + # + # Output kraft version of server.properties + # + echo "#" + echo "# strimzi.properties (kraft)" + echo "#" + + echo process.roles=`pop_value process.roles broker,controller` + echo node.id=`pop_value node.id 1` + echo log.dirs=`pop_value log.dirs /tmp/kraft-combined-logs` + +elif [[ "$1" == "" ]]; then + echo "#" + echo "# strimzi.properties" + echo "#" + + echo broker.id=`pop_value broker.id 0` + echo log.dirs=`pop_value log.dirs /tmp/kafka-logs` + echo group.initial.rebalance.delay.ms=`pop_value group.initial.rebalance.delay.ms 0` +else + echo "Unsupported argument: $1" + exit 1 +fi + echo num.network.threads=`pop_value num.network.threads 3` echo num.io.threads=`pop_value num.io.threads 8` echo socket.send.buffer.bytes=`pop_value socket.send.buffer.bytes 102400` echo socket.receive.buffer.bytes=`pop_value socket.receive.buffer.bytes 102400` echo socket.request.max.bytes=`pop_value socket.request.max.bytes 104857600` -echo log.dirs=`pop_value log.dirs /tmp/kafka-logs` echo num.partitions=`pop_value num.partitions 1` echo num.recovery.threads.per.data.dir=`pop_value num.recovery.threads.per.data.dir 1` echo offsets.topic.replication.factor=`pop_value offsets.topic.replication.factor 1` @@ -71,9 +91,6 @@ echo transaction.state.log.min.isr=`pop_value transaction.state.log.min.isr 1` echo log.retention.hours=`pop_value log.retention.hours 168` echo log.segment.bytes=`pop_value log.segment.bytes 1073741824` echo log.retention.check.interval.ms=`pop_value log.retention.check.interval.ms 300000` -echo zookeeper.connect=`pop_value zookeeper.connect localhost:2181` -echo zookeeper.connection.timeout.ms=`pop_value zookeeper.connection.timeout.ms 6000` -echo group.initial.rebalance.delay.ms=`pop_value group.initial.rebalance.delay.ms 0` # # Add what remains of KAFKA_* env vars diff --git a/testsuite/docker/kafka/scripts/start.sh b/testsuite/docker/kafka/scripts/start.sh index 3722f759..9668c5c2 100755 --- a/testsuite/docker/kafka/scripts/start.sh +++ b/testsuite/docker/kafka/scripts/start.sh @@ -12,14 +12,35 @@ wait_for_url $URI "Waiting for Keycloak to start" wait_for_url "$URI/realms/${REALM:-demo}" "Waiting for realm '${REALM}' to be available" -./simple_kafka_config.sh | tee /tmp/strimzi.properties +[ "$KAFKA_ZOOKEEPER_CONNECT" == "" ] && KAFKA_ZOOKEEPER_CONNECT=localhost:2181 +[ "$KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS" == "" ] && KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000 -# Add 'admin' user -KAFKA_DEBUG= /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[password=admin-secret]' --entity-type users --entity-name admin +./simple_kafka_config.sh $1 | tee /tmp/strimzi.properties -# Add 'alice' user -KAFKA_DEBUG= /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[password=alice-secret]' --entity-type users --entity-name alice +echo "Config created" + +KAFKA_DEBUG_PASSED=$KAFKA_DEBUG +unset KAFKA_DEBUG + +# add extra jars to classpath +export CLASSPATH="/opt/kafka/libs/strimzi/*:$CLASSPATH" +echo "CLASSPATH=$CLASSPATH" + +if [[ "$1" == "--kraft" ]]; then + KAFKA_CLUSTER_ID="$(/opt/kafka/bin/kafka-storage.sh random-uuid)" + /opt/kafka/bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c /tmp/strimzi.properties + echo "Initialised kafka storage for KRaft" +else + # Add 'admin' user + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[password=admin-secret]' --entity-type users --entity-name admin + # Add 'alice' user + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[password=alice-secret]' --entity-type users --entity-name alice + + echo "Added user secrets for SCRAM" +fi + +export KAFKA_DEBUG=$KAFKA_DEBUG_PASSED # set log dir to writable directory @@ -33,10 +54,6 @@ if [ "$KAFKA_LOG4J_OPTS" == "" ]; then fi echo "KAFKA_LOG4J_OPTS=$KAFKA_LOG4J_OPTS" -# add extra jars to classpath -export CLASSPATH="/opt/kafka/libs/strimzi/*:$CLASSPATH" -echo "CLASSPATH=$CLASSPATH" - # Prometheus JMX agent config if [ "$PROMETHEUS_AGENT_CONFIG" == "" ]; then diff --git a/testsuite/docker/kafka/scripts/start_no_wait.sh b/testsuite/docker/kafka/scripts/start_no_wait.sh index d432fc49..1c82d751 100755 --- a/testsuite/docker/kafka/scripts/start_no_wait.sh +++ b/testsuite/docker/kafka/scripts/start_no_wait.sh @@ -1,6 +1,9 @@ #!/bin/bash set -e +[ "$KAFKA_ZOOKEEPER_CONNECT" == "" ] && KAFKA_ZOOKEEPER_CONNECT=localhost:2181 +[ "$KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS" == "" ] && KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000 + ./simple_kafka_config.sh | tee /tmp/strimzi.properties # set log dir to writable directory diff --git a/testsuite/docker/kafka/scripts/start_with_hydra.sh b/testsuite/docker/kafka/scripts/start_with_hydra.sh index c315f176..e6b69ca1 100755 --- a/testsuite/docker/kafka/scripts/start_with_hydra.sh +++ b/testsuite/docker/kafka/scripts/start_with_hydra.sh @@ -11,6 +11,10 @@ URI="https://hydra-jwt:4455/clients" wait_for_url $URI "Waiting for Hydra JWT admin REST to start" wait_for_url $URI/kafka-broker "Waiting for kafka-broker client to be available" + +[ "$KAFKA_ZOOKEEPER_CONNECT" == "" ] && KAFKA_ZOOKEEPER_CONNECT=localhost:2181 +[ "$KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS" == "" ] && KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000 + ./simple_kafka_config.sh | tee /tmp/strimzi.properties # set log dir to writable directory diff --git a/testsuite/docker/keycloak/realms/kafka-authz-realm.json b/testsuite/docker/keycloak/realms/kafka-authz-realm.json index 2f3532e9..2e86778f 100644 --- a/testsuite/docker/keycloak/realms/kafka-authz-realm.json +++ b/testsuite/docker/keycloak/realms/kafka-authz-realm.json @@ -87,6 +87,24 @@ "account" : [ "view-profile", "manage-account" ] }, "groups" : [ "/ClusterManager-my-cluster Group" ] + }, { + "username" : "zero", + "enabled" : true, + "totp" : false, + "emailVerified" : true, + "firstName" : "Zero", + "email" : "zero@strimzi.io", + "credentials" : [ { + "type" : "password", + "value" : "zero-password" + } ], + "disableableCredentialTypes" : [ ], + "requiredActions" : [ ], + "realmRoles" : [ "offline_access", "uma_authorization" ], + "clientRoles" : { + "account" : [ "view-profile", "manage-account" ] + }, + "groups" : [] }, { "username" : "service-account-team-a-client", diff --git a/testsuite/keycloak-auth-tests/src/test/java/io/strimzi/testsuite/oauth/auth/BasicTests.java b/testsuite/keycloak-auth-tests/src/test/java/io/strimzi/testsuite/oauth/auth/BasicTests.java index fe9d937a..09ae1aa8 100644 --- a/testsuite/keycloak-auth-tests/src/test/java/io/strimzi/testsuite/oauth/auth/BasicTests.java +++ b/testsuite/keycloak-auth-tests/src/test/java/io/strimzi/testsuite/oauth/auth/BasicTests.java @@ -151,30 +151,29 @@ void clientCredentialsWithJwtECDSAValidation() throws Exception { oauthConfig.put(ClientConfig.OAUTH_CLIENT_SECRET, "kafka-producer-client-secret"); oauthConfig.put(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username"); - Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Producer producer = new KafkaProducer<>(producerProps); - final String topic = "KeycloakAuthenticationTest-clientCredentialsWithJwtECDSAValidationTest"; - - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); + Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); + try (Producer producer = new KafkaProducer<>(producerProps)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } Properties consumerProps = buildConsumerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Consumer consumer = new KafkaConsumer<>(consumerProps); - - TopicPartition partition = new TopicPartition(topic, 0); - consumer.assign(singletonList(partition)); + try (Consumer consumer = new KafkaConsumer<>(consumerProps)) { + TopicPartition partition = new TopicPartition(topic, 0); + consumer.assign(singletonList(partition)); - while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { - System.out.println("No assignment yet for consumer"); - } - consumer.seekToBeginning(singletonList(partition)); + while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { + System.out.println("No assignment yet for consumer"); + } + consumer.seekToBeginning(singletonList(partition)); - ConsumerRecords records = poll(consumer); + ConsumerRecords records = poll(consumer); - Assert.assertEquals("Got message", 1, records.count()); - Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + Assert.assertEquals("Got message", 1, records.count()); + Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + } // Check metrics @@ -196,7 +195,7 @@ void clientCredentialsWithJwtECDSAValidation() throws Exception { /** * This test uses the Kafka listener configured with both OAUTHBEARER and PLAIN, and the Keycloak realm * that uses the default RSA cryptography to sign tokens. - * + *

      * It connects to the Kafka using the OAUTHBEARER mechanism * * @throws Exception Any unhandled error @@ -221,30 +220,29 @@ void clientCredentialsWithJwtRSAValidation() throws Exception { oauthConfig.put(ClientConfig.OAUTH_CLIENT_SECRET, "team-a-client-secret"); oauthConfig.put(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username"); - Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Producer producer = new KafkaProducer<>(producerProps); - final String topic = "KeycloakAuthenticationTest-clientCredentialsWithJwtRSAValidationTest"; - - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); + Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); + try (Producer producer = new KafkaProducer<>(producerProps)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } Properties consumerProps = buildConsumerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Consumer consumer = new KafkaConsumer<>(consumerProps); - - TopicPartition partition = new TopicPartition(topic, 0); - consumer.assign(singletonList(partition)); + try (Consumer consumer = new KafkaConsumer<>(consumerProps)) { + TopicPartition partition = new TopicPartition(topic, 0); + consumer.assign(singletonList(partition)); - while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { - System.out.println("No assignment yet for consumer"); - } - consumer.seekToBeginning(singletonList(partition)); + while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { + System.out.println("No assignment yet for consumer"); + } + consumer.seekToBeginning(singletonList(partition)); - ConsumerRecords records = poll(consumer); + ConsumerRecords records = poll(consumer); - Assert.assertEquals("Got message", 1, records.count()); - Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + Assert.assertEquals("Got message", 1, records.count()); + Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + } // Check metrics @@ -280,30 +278,29 @@ void accessTokenWithIntrospection() throws Exception { oauthConfig.put(ClientConfig.OAUTH_ACCESS_TOKEN, info.token()); oauthConfig.put(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username"); - Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Producer producer = new KafkaProducer<>(producerProps); - - final String topic = "KeycloakAuthenticationTest-accessTokenWithIntrospectionTest"; - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); + Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); + try (Producer producer = new KafkaProducer<>(producerProps)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } Properties consumerProps = buildConsumerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Consumer consumer = new KafkaConsumer<>(consumerProps); - - TopicPartition partition = new TopicPartition(topic, 0); - consumer.assign(singletonList(partition)); + try (Consumer consumer = new KafkaConsumer<>(consumerProps)) { + TopicPartition partition = new TopicPartition(topic, 0); + consumer.assign(singletonList(partition)); - while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { - System.out.println("No assignment yet for consumer"); - } - consumer.seekToBeginning(singletonList(partition)); + while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { + System.out.println("No assignment yet for consumer"); + } + consumer.seekToBeginning(singletonList(partition)); - ConsumerRecords records = poll(consumer); + ConsumerRecords records = poll(consumer); - Assert.assertEquals("Got message", 1, records.count()); - Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + Assert.assertEquals("Got message", 1, records.count()); + Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + } // Check metrics TestMetrics metrics = getPrometheusMetrics(URI.create("http://kafka:9404/metrics")); @@ -343,30 +340,29 @@ void refreshTokenWithIntrospection() throws Exception { oauthConfig.put(ClientConfig.OAUTH_REFRESH_TOKEN, refreshToken); oauthConfig.put(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username"); - Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Producer producer = new KafkaProducer<>(producerProps); - - final String topic = "KeycloakAuthenticationTest-refreshTokenWithIntrospectionTest"; - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); + Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); + try (Producer producer = new KafkaProducer<>(producerProps)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } Properties consumerProps = buildConsumerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Consumer consumer = new KafkaConsumer<>(consumerProps); + try (Consumer consumer = new KafkaConsumer<>(consumerProps)) { + TopicPartition partition = new TopicPartition(topic, 0); + consumer.assign(singletonList(partition)); - TopicPartition partition = new TopicPartition(topic, 0); - consumer.assign(singletonList(partition)); + while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { + System.out.println("No assignment yet for consumer"); + } + consumer.seekToBeginning(singletonList(partition)); - while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { - System.out.println("No assignment yet for consumer"); - } - consumer.seekToBeginning(singletonList(partition)); - - ConsumerRecords records = poll(consumer); + ConsumerRecords records = poll(consumer); - Assert.assertEquals("Got message", 1, records.count()); - Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + Assert.assertEquals("Got message", 1, records.count()); + Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + } // Check metrics TestMetrics metrics = getPrometheusMetrics(URI.create("http://kafka:9404/metrics")); @@ -396,30 +392,29 @@ void passwordGrantWithJwtRSAValidation() throws Exception { oauthConfig.put(ClientConfig.OAUTH_PASSWORD_GRANT_PASSWORD, "alice-password"); oauthConfig.put(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username"); - Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Producer producer = new KafkaProducer<>(producerProps); - final String topic = "KeycloakAuthenticationTest-passwordGrantWithJwtRSAValidationTest"; - - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); + Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); + try (Producer producer = new KafkaProducer<>(producerProps)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } Properties consumerProps = buildConsumerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Consumer consumer = new KafkaConsumer<>(consumerProps); + try (Consumer consumer = new KafkaConsumer<>(consumerProps)) { + TopicPartition partition = new TopicPartition(topic, 0); + consumer.assign(singletonList(partition)); - TopicPartition partition = new TopicPartition(topic, 0); - consumer.assign(singletonList(partition)); + while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { + System.out.println("No assignment yet for consumer"); + } + consumer.seekToBeginning(singletonList(partition)); - while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { - System.out.println("No assignment yet for consumer"); - } - consumer.seekToBeginning(singletonList(partition)); + ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); - ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); - - Assert.assertEquals("Got message", 1, records.count()); - Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + Assert.assertEquals("Got message", 1, records.count()); + Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + } } @@ -453,15 +448,13 @@ void passwordGrantWithIntrospection() throws Exception { oauthConfig.put(ClientConfig.OAUTH_PASSWORD_GRANT_PASSWORD, password); oauthConfig.put(ClientConfig.OAUTH_USERNAME_CLAIM, "preferred_username"); - Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Producer producer = new KafkaProducer<>(producerProps); - - final String topic = "KeycloakAuthenticationTest-passwordGrantWithIntrospectionTest"; - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); - + Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); + try (Producer producer = new KafkaProducer<>(producerProps)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } // Authenticate using the username and password and a confidential client - different clientId + secret for consumer @@ -480,20 +473,19 @@ void passwordGrantWithIntrospection() throws Exception { oauthConfig.put(ClientConfig.OAUTH_CLIENT_SECRET, confidentialClientSecret); Properties consumerProps = buildConsumerConfigOAuthBearer(kafkaBootstrap, oauthConfig); - Consumer consumer = new KafkaConsumer<>(consumerProps); - - TopicPartition partition = new TopicPartition(topic, 0); - consumer.assign(singletonList(partition)); + try (Consumer consumer = new KafkaConsumer<>(consumerProps)) { + TopicPartition partition = new TopicPartition(topic, 0); + consumer.assign(singletonList(partition)); - while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { - System.out.println("No assignment yet for consumer"); - } - consumer.seekToBeginning(singletonList(partition)); + while (consumer.partitionsFor(topic, Duration.ofSeconds(1)).size() == 0) { + System.out.println("No assignment yet for consumer"); + } + consumer.seekToBeginning(singletonList(partition)); - ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); + ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); - Assert.assertEquals("Got message", 1, records.count()); - Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + Assert.assertEquals("Got message", 1, records.count()); + Assert.assertEquals("Is message text: 'The Message'", "The Message", records.iterator().next().value()); + } } - } diff --git a/testsuite/keycloak-authz-kraft-tests/docker-compose.yml b/testsuite/keycloak-authz-kraft-tests/docker-compose.yml new file mode 100644 index 00000000..31991b79 --- /dev/null +++ b/testsuite/keycloak-authz-kraft-tests/docker-compose.yml @@ -0,0 +1,153 @@ +version: '3' + +services: + keycloak: + image: quay.io/keycloak/keycloak:19.0.3-legacy + ports: + - "8080:8080" + - "8443:8443" + volumes: + - ${PWD}/../docker/keycloak/realms:/opt/jboss/keycloak/realms + + entrypoint: "" + + command: + - /bin/bash + - -c + - cd /opt/jboss && /opt/jboss/tools/docker-entrypoint.sh -Dkeycloak.profile.feature.upload_scripts=enabled -b 0.0.0.0 + + environment: + - KEYCLOAK_USER=admin + - KEYCLOAK_PASSWORD=admin + - KEYCLOAK_HTTPS_PORT=8443 + - PROXY_ADDRESS_FORWARDING=true + - KEYCLOAK_IMPORT=/opt/jboss/keycloak/realms/kafka-authz-realm.json + + kafka: + image: ${KAFKA_DOCKER_IMAGE} + ports: + - "9091:9091" + - "9092:9092" + - "9093:9093" + - "9094:9094" + - "9095:9095" + - "9096:9096" + - "9100:9100" + + # Prometheus JMX Exporter + - "9404:9404" + + # javaagent debug port + - "5006:5006" + volumes: + - ${PWD}/../docker/target/kafka/libs:/opt/kafka/libs/strimzi + - ${PWD}/../docker/kafka/config:/opt/kafka/config/strimzi + - ${PWD}/../docker/kafka/scripts:/opt/kafka/strimzi + command: + - /bin/bash + - -c + - cd /opt/kafka/strimzi && ./start.sh --kraft + environment: + + #- KAFKA_DEBUG=y + #- DEBUG_SUSPEND_FLAG=y + #- JAVA_DEBUG_PORT=*:5006 + + # KRaft properties + - KAFKA_PROCESS_ROLES=broker,controller + - KAFKA_NODE_ID=1 + - KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka:9091 + - KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER + - KAFKA_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN + + - KAFKA_LISTENERS=CONTROLLER://kafka:9091,JWT://kafka:9092,INTROSPECT://kafka:9093,JWTPLAIN://kafka:9094,INTROSPECTPLAIN://kafka:9095,JWTREFRESH://kafka:9096,PLAIN://kafka:9100 + - KAFKA_ADVERTISED_LISTENERS=JWT://kafka:9092,INTROSPECT://kafka:9093,JWTPLAIN://kafka:9094,INTROSPECTPLAIN://kafka:9095,JWTREFRESH://kafka:9096,PLAIN://kafka:9100 + - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:SASL_PLAINTEXT,JWT:SASL_PLAINTEXT,INTROSPECT:SASL_PLAINTEXT,JWTPLAIN:SASL_PLAINTEXT,INTROSPECTPLAIN:SASL_PLAINTEXT,JWTREFRESH:SASL_PLAINTEXT,PLAIN:SASL_PLAINTEXT + + - KAFKA_INTER_BROKER_LISTENER_NAME=JWT + - KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=OAUTHBEARER + + - KAFKA_PRINCIPAL_BUILDER_CLASS=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder + + # Common settings for all the listeners + # username extraction from JWT token claim + - OAUTH_USERNAME_CLAIM=preferred_username + - OAUTH_CONNECT_TIMEOUT_SECONDS=20 + + - OAUTH_ENABLE_METRICS=true + + # Configuration of individual listeners + - KAFKA_LISTENER_NAME_CONTROLLER_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_CONTROLLER_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-password\" user_admin=\"admin-password\" user_bobby=\"bobby-secret\" ; + + - KAFKA_LISTENER_NAME_JWT_SASL_ENABLED_MECHANISMS=OAUTHBEARER + - KAFKA_LISTENER_NAME_JWT_OAUTHBEARER_SASL_JAAS_CONFIG=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required oauth.jwks.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/certs\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" oauth.groups.claim=\"$$.realm_access.roles\" ; + - KAFKA_LISTENER_NAME_JWT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + - KAFKA_LISTENER_NAME_JWT_OAUTHBEARER_SASL_LOGIN_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler + + - KAFKA_LISTENER_NAME_INTROSPECT_SASL_ENABLED_MECHANISMS=OAUTHBEARER + - KAFKA_LISTENER_NAME_INTROSPECT_OAUTHBEARER_SASL_JAAS_CONFIG=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required oauth.introspection.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token/introspect\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_INTROSPECT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + + - KAFKA_LISTENER_NAME_JWTPLAIN_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_JWTPLAIN_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required oauth.jwks.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/certs\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_JWTPLAIN_PLAIN_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler + + - KAFKA_LISTENER_NAME_INTROSPECTPLAIN_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_INTROSPECTPLAIN_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required oauth.introspection.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token/introspect\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_INTROSPECTPLAIN_PLAIN_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler + + - KAFKA_LISTENER_NAME_JWTREFRESH_SASL_ENABLED_MECHANISMS=OAUTHBEARER + - KAFKA_LISTENER_NAME_JWTREFRESH_OAUTHBEARER_SASL_JAAS_CONFIG=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required oauth.jwks.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/certs\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" oauth.jwks.refresh.min.pause.seconds=\"2\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_JWTREFRESH_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + # Enable re-authentication + - KAFKA_LISTENER_NAME_JWTREFRESH_OAUTHBEARER_CONNECTIONS_MAX_REAUTH_MS=3600000 + + - KAFKA_LISTENER_NAME_PLAIN_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_PLAIN_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-password\" user_admin=\"admin-password\" user_bobby=\"bobby-secret\" ; + + + # Authorizer configuration + - KAFKA_AUTHORIZER_CLASS_NAME=io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer + + - KAFKA_STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI=http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token + - KAFKA_STRIMZI_AUTHORIZATION_CLIENT_ID=kafka + - KAFKA_STRIMZI_AUTHORIZATION_CLIENT_SECRET=kafka-secret + - KAFKA_STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME=my-cluster + - KAFKA_STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL=true + - KAFKA_STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS=45 + + # Parameters controlling the refreshing of grants + - KAFKA_STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE=4 + + # Any change to permissions will be reflected within 10 seconds + # Has to be set to 10 seconds for keycloak-authz*-tests/**/RefreshTest + - KAFKA_STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS=10 + + # If a grants fetch fails, immediately perform one retry + - KAFKA_STRIMZI_AUTHORIZATION_HTTP_RETRIES=1 + + # Use grants fetched for another session if available + - KAFKA_STRIMZI_AUTHORIZATION_REUSE_GRANTS=true + + - KAFKA_STRIMZI_AUTHORIZATION_ENABLE_METRICS=true + + - KAFKA_SUPER_USERS=User:admin;User:service-account-kafka + + # Other configuration + - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 + + # For start.sh script to know where the keycloak is listening + - KEYCLOAK_HOST=${KEYCLOAK_HOST:-keycloak} + - REALM=${REALM:-kafka-authz} + + kafka-acls: + image: ${KAFKA_DOCKER_IMAGE} + links: + - kafka + volumes: + - ${PWD}/../docker/kafka-acls/scripts:/opt/kafka/strimzi + command: + - /bin/bash + - -c + - cd /opt/kafka/strimzi && ./add-acls.sh diff --git a/testsuite/keycloak-authz-kraft-tests/pom.xml b/testsuite/keycloak-authz-kraft-tests/pom.xml new file mode 100644 index 00000000..c488d253 --- /dev/null +++ b/testsuite/keycloak-authz-kraft-tests/pom.xml @@ -0,0 +1,63 @@ + + + + 4.0.0 + + + io.strimzi.oauth.testsuite + kafka-oauth-testsuite + 1.0.0-SNAPSHOT + + + keycloak-authz-kraft-tests + + + + Apache License, Version 2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + + ../.. + + + + + org.testcontainers + testcontainers + test + + + junit + junit + ${version.junit} + + + io.strimzi.oauth.testsuite + common + + + io.strimzi.oauth.testsuite + keycloak-authz-tests + + + + io.strimzi + kafka-oauth-common + + + io.strimzi + kafka-oauth-client + + + org.apache.kafka + kafka-clients + + + org.slf4j + slf4j-simple + + + \ No newline at end of file diff --git a/testsuite/keycloak-authz-kraft-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakKRaftAuthorizationTests.java b/testsuite/keycloak-authz-kraft-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakKRaftAuthorizationTests.java new file mode 100644 index 00000000..3135ca71 --- /dev/null +++ b/testsuite/keycloak-authz-kraft-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakKRaftAuthorizationTests.java @@ -0,0 +1,120 @@ +/* + * Copyright 2017-2020, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.authz.kraft; + +import io.strimzi.testsuite.oauth.authz.BasicTest; +import io.strimzi.testsuite.oauth.authz.ConfigurationTest; +import io.strimzi.testsuite.oauth.authz.FloodTest; +import io.strimzi.testsuite.oauth.authz.MetricsTest; +import io.strimzi.testsuite.oauth.authz.MultiSaslTest; +import io.strimzi.testsuite.oauth.authz.OAuthOverPlainTest; +import io.strimzi.testsuite.oauth.authz.RefreshTest; +import io.strimzi.testsuite.oauth.authz.SingletonTest; +import io.strimzi.testsuite.oauth.common.TestContainersLogCollector; +import io.strimzi.testsuite.oauth.common.TestContainersWatcher; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestRule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.File; +import java.time.Duration; + +import static io.strimzi.testsuite.oauth.authz.Common.waitForACLs; +import static io.strimzi.testsuite.oauth.common.TestUtil.logStart; + +/** + * Tests for OAuth authentication using Keycloak + Keycloak Authorization Services based authorization when KeycloakAuthorizer is configured on the Kafka broker running in KRaft mode + *

      + * This test assumes there are multiple listeners configured with OAUTHBEARER or PLAIN support, but each configured differently + * - configured with different options, or different realm. + *

      + * There is KeycloakAuthorizer configured on the Kafka broker. + */ +public class KeycloakKRaftAuthorizationTests { + + @ClassRule + public static TestContainersWatcher environment = + new TestContainersWatcher(new File("docker-compose.yml")) + .withServices("keycloak", "kafka", "kafka-acls") + + // ensure kafka has started + .waitingFor("kafka", Wait.forLogMessage(".*started \\(kafka.server.KafkaRaftServer\\).*", 1) + .withStartupTimeout(Duration.ofSeconds(120))); + + // ensure ACLs for user 'alice' have been added + // Moved into test code: waitForACLs() + + @Rule + public TestRule logCollector = new TestContainersLogCollector(environment); + + private static final Logger log = LoggerFactory.getLogger(KeycloakKRaftAuthorizationTests.class); + + private static final String JWT_LISTENER = "kafka:9092"; + private static final String INTROSPECT_LISTENER = "kafka:9093"; + private static final String JWTPLAIN_LISTENER = "kafka:9094"; + private static final String INTROSPECTPLAIN_LISTENER = "kafka:9095"; + private static final String JWTREFRESH_LISTENER = "kafka:9096"; + + + @Test + public void doTest() throws Exception { + try { + + String kafkaContainer = environment.getContainerByServiceName("kafka_1").get().getContainerInfo().getName().substring(1); + + logStart("KeycloakKRaftAuthorizationTest :: ConfigurationTest"); + new ConfigurationTest(kafkaContainer).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: MetricsTest (part 1)"); + MetricsTest.doTest(); + + // Before running the rest of the tests, ensure ACLs have been added to Kafka cluster + waitForACLs(); + + logStart("KeycloakKRaftAuthorizationTest :: MultiSaslTests"); + new MultiSaslTest(kafkaContainer).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: JwtValidationAuthzTest"); + new BasicTest(kafkaContainer, JWT_LISTENER, false).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: IntrospectionValidationAuthzTest"); + new BasicTest(kafkaContainer, INTROSPECT_LISTENER, false).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: MetricsTest (part 2)"); + MetricsTest.doTestValidationAndAuthorization(); + + logStart("KeycloakKRaftAuthorizationTest :: OAuthOverPlain + JwtValidationAuthzTest"); + new OAuthOverPlainTest(kafkaContainer, JWTPLAIN_LISTENER, true).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: OAuthOverPlain + IntrospectionValidationAuthzTest"); + new OAuthOverPlainTest(kafkaContainer, INTROSPECTPLAIN_LISTENER, true).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: OAuthOverPLain + FloodTest"); + new FloodTest(JWTPLAIN_LISTENER, true).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: JWT FloodTest"); + new FloodTest(JWT_LISTENER, false).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: Introspection FloodTest"); + new FloodTest(INTROSPECT_LISTENER, false).doTest(); + + // This test has to be the last one - it changes the team-a-client, and team-b-client permissions in Keycloak + logStart("KeycloakKRaftAuthorizationTest :: JwtValidationAuthzTest + RefreshGrants"); + new RefreshTest(kafkaContainer, JWTREFRESH_LISTENER, false).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: SingletonTest"); + new SingletonTest(kafkaContainer).doSingletonTest(2); + + } catch (Throwable e) { + log.error("Keycloak Raft Authorization Test failed: ", e); + throw e; + } + } +} diff --git a/testsuite/keycloak-authz-kraft-tests/src/test/resources/simplelogger.properties b/testsuite/keycloak-authz-kraft-tests/src/test/resources/simplelogger.properties new file mode 100644 index 00000000..53f71fbd --- /dev/null +++ b/testsuite/keycloak-authz-kraft-tests/src/test/resources/simplelogger.properties @@ -0,0 +1,2 @@ +org.slf4j.simpleLogger.log.org.apache.kafka=OFF +org.slf4j.simpleLogger.log.io.strimzi=INFO \ No newline at end of file diff --git a/testsuite/keycloak-authz-tests/pom.xml b/testsuite/keycloak-authz-tests/pom.xml index 9f0cda92..bd0074a2 100644 --- a/testsuite/keycloak-authz-tests/pom.xml +++ b/testsuite/keycloak-authz-tests/pom.xml @@ -55,5 +55,9 @@ org.slf4j slf4j-simple + + com.github.spotbugs + spotbugs-annotations + \ No newline at end of file diff --git a/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/BasicTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/BasicTest.java new file mode 100644 index 00000000..b5b82345 --- /dev/null +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/BasicTest.java @@ -0,0 +1,291 @@ +/* + * Copyright 2017-2020, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.authz; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.strimzi.testsuite.oauth.common.TestUtil; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.junit.Assert; + +import java.util.List; +import java.util.Properties; + +import static java.util.Collections.singletonList; + + +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") +public class BasicTest extends Common { + + private final String kafkaContainer; + + public BasicTest(String kafkaContainer, String kafkaBootstrap, boolean oauthOverPlain) { + super(kafkaBootstrap, oauthOverPlain); + this.kafkaContainer = kafkaContainer; + } + + public void doTest() throws Exception { + + authenticateAllActors(); + + testTeamAClientPart1(); + + testTeamBClientPart1(); + + createTopicAsClusterManager(); + + testTeamAClientPart2(); + + testTeamBClientPart2(); + + testClusterManager(); + + testUserWithNoPermissions(); + + cleanup(); + } + + void createTopicAsClusterManager() throws Exception { + + Properties bobAdminProps = buildAdminConfigForAccount(Common.BOB); + try (AdminClient admin = AdminClient.create(bobAdminProps)) { + // + // Create x_* topic + // + admin.createTopics(singletonList(new NewTopic(Common.TOPIC_X, 1, (short) 1))).all().get(); + } + } + + void testClusterManager() throws Exception { + + Properties bobAdminProps = buildProducerConfigForAccount(Common.BOB); + Producer producer = new KafkaProducer<>(bobAdminProps); + + Properties consumerProps = buildConsumerConfigForAccount(Common.BOB); + Consumer consumer = new KafkaConsumer<>(consumerProps); + + // + // bob should succeed producing to x_* topic + // + Common.produce(producer, Common.TOPIC_X); + + // + // bob should succeed producing to a_* topic + // + Common.produce(producer, Common.TOPIC_A); + + // + // bob should succeed producing to b_* topic + // + Common.produce(producer, Common.TOPIC_B); + + // + // bob should succeed producing to non-existing topic + // + Common.produce(producer, "non-existing-topic"); + + // + // bob should succeed consuming from x_* topic + // + Common.consume(consumer, Common.TOPIC_X); + + // + // bob should succeed consuming from a_* topic + // + Common.consume(consumer, Common.TOPIC_A); + + // + // bob should succeed consuming from b_* topic + // + Common.consume(consumer, Common.TOPIC_B); + + // + // bob should succeed consuming from "non-existing-topic" - which now exists + // + Common.consume(consumer, "non-existing-topic"); + } + + void testTeamAClientPart1() throws Exception { + + Producer teamAProducer = getProducer(Common.TEAM_A_CLIENT); + + // + // team-a-client should fail to produce to b_* topic + // + Common.produceFail(teamAProducer, Common.TOPIC_B); + + // Re-init producer because message to topicB is stuck in the queue, and any subsequent message to another queue + // won't be handled until first message makes it through. + teamAProducer = newProducer(Common.TEAM_A_CLIENT); + + // + // team-a-client should succeed producing to a_* topic + // + Common.produce(teamAProducer, Common.TOPIC_A); + + // + // team-a-client should also fail producing to non-existing x_* topic (fails to create it) + // + Common.produceFail(teamAProducer, Common.TOPIC_X); + + Consumer teamAConsumer = newConsumer(Common.TEAM_A_CLIENT, Common.TOPIC_B); + + // + // team-a-client should fail consuming from b_* topic + // + Common.consumeFail(teamAConsumer, Common.TOPIC_B); + + + // Close and re-init consumer + teamAConsumer = newConsumer(Common.TEAM_A_CLIENT, Common.TOPIC_A); + + // + // team-a-client should succeed consuming from a_* topic + // + Common.consume(teamAConsumer, Common.TOPIC_A); + + // + // team-a-client should fail consuming from x_* topic - it doesn't exist + // + Common.consumeFail(teamAConsumer, Common.TOPIC_X); + } + + void testTeamBClientPart1() throws Exception { + + Producer teamBProducer = getProducer(Common.TEAM_B_CLIENT); + + // + // team-b-client should fail to produce to a_* topic + // + Common.produceFail(teamBProducer, Common.TOPIC_A); + + // Re-init producer because message to topicA is stuck in the queue, and any subsequent message to another queue + // won't be handled until first message makes it through. + teamBProducer = newProducer(Common.TEAM_B_CLIENT); + + // + // team-b-client should succeed producing to b_* topic + // + Common.produce(teamBProducer, Common.TOPIC_B); + + // + // team-b-client should fail to produce to x_* topic + // + Common.produceFail(teamBProducer, Common.TOPIC_X); + + + Consumer teamBConsumer = newConsumer(Common.TEAM_B_CLIENT, Common.TOPIC_A); + + // + // team-b-client should fail consuming from a_* topic + // + Common.consumeFail(teamBConsumer, Common.TOPIC_A); + + // Close and re-init consumer + teamBConsumer = newConsumer(Common.TEAM_B_CLIENT, Common.TOPIC_B); + + // + // team-b-client should succeed consuming from b_* topic + // + Common.consume(teamBConsumer, Common.TOPIC_B); + } + + void testTeamAClientPart2() throws Exception { + + // + // team-a-client should succeed producing to existing x_* topic + // + Producer teamAProducer = newProducer(Common.TEAM_A_CLIENT); + + Common.produce(teamAProducer, Common.TOPIC_X); + + // + // team-a-client should fail reading from x_* topic + // + Consumer teamAConsumer = newConsumer(Common.TEAM_A_CLIENT, Common.TOPIC_A); + Common.consumeFail(teamAConsumer, Common.TOPIC_X); + } + + void testTeamBClientPart2() throws Exception { + // + // team-b-client should succeed consuming from x_* topic + // + Consumer teamBConsumer = newConsumer(Common.TEAM_B_CLIENT, Common.TOPIC_B); + Common.consume(teamBConsumer, Common.TOPIC_X); + + + // + // team-b-client should fail producing to x_* topic + // + Producer teamBProducer = newProducer(Common.TEAM_B_CLIENT); + Common.produceFail(teamBProducer, Common.TOPIC_X); + } + + void testUserWithNoPermissions() throws Exception { + // + // User 'zero' has no matching policies, the fetching of grants should return 403 and user should be denied all operations + // + Properties producerProps = buildProducerConfigForAccount(Common.ZERO); + Producer producer = new KafkaProducer<>(producerProps); + + Properties consumerProps = buildConsumerConfigForAccount(Common.ZERO); + Consumer consumer = new KafkaConsumer<>(consumerProps); + + // + // 'zero' should fail producing to x_* topic + // + Common.produceFail(producer, Common.TOPIC_X); + + // + // 'zero' should fail producing to a_* topic + // + Common.produceFail(producer, Common.TOPIC_A); + + // + // 'zero' should fail producing to b_* topic + // + Common.produceFail(producer, Common.TOPIC_B); + + // + // 'zero' should fail producing to non-existing topic + // + Common.produceFail(producer, "non-existing-topic"); + + // + // 'zero' should fail consuming from x_* topic + // + Common.consumeFail(consumer, Common.TOPIC_X); + + // + // 'zero' should fail consuming from a_* topic + // + Common.consumeFail(consumer, Common.TOPIC_A); + + // + // 'zero' should fail consuming from b_* topic + // + Common.consumeFail(consumer, Common.TOPIC_B); + + // + // 'zero' should fail consuming from "non-existing-topic" - which now exists + // + Common.consumeFail(consumer, "non-existing-topic"); + + // check kafka log + List lines = TestUtil.getContainerLogsForString(kafkaContainer, "Saving non-null grants for user: zero"); + Assert.assertEquals("Saved non-null grants", 1, lines.size()); + + lines = TestUtil.getContainerLogsForString(kafkaContainer, "Got grants for 'OAuthKafkaPrincipal(User:zero,"); + Assert.assertTrue("Grants for user are: {}", lines.size() > 0); + + for (String line: lines) { + Assert.assertTrue("Grants for user are: {}", line.contains(": {}")); + } + } +} diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/Common.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/Common.java similarity index 79% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/Common.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/Common.java index a8d68364..12799571 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/Common.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/Common.java @@ -5,8 +5,10 @@ package io.strimzi.testsuite.oauth.authz; import com.fasterxml.jackson.databind.JsonNode; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.kafka.oauth.client.ClientConfig; import io.strimzi.kafka.oauth.common.HttpUtil; +import io.strimzi.testsuite.oauth.common.TestUtil; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; @@ -17,15 +19,24 @@ import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.acl.AccessControlEntryFilter; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.acl.AclPermissionType; import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.apache.kafka.common.resource.ResourcePatternFilter; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.URI; import java.time.Duration; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -35,16 +46,20 @@ import static io.strimzi.kafka.oauth.common.OAuthAuthenticator.loginWithClientSecret; import static io.strimzi.kafka.oauth.common.OAuthAuthenticator.urlencode; +@SuppressFBWarnings({"THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION", "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION"}) public class Common { + private static final Logger log = LoggerFactory.getLogger(Common.class); static final String HOST = "keycloak"; static final String REALM = "kafka-authz"; static final String TOKEN_ENDPOINT_URI = "http://" + HOST + ":8080/auth/realms/" + REALM + "/protocol/openid-connect/token"; + private static final String PLAIN_LISTENER = "kafka:9100"; static final String TEAM_A_CLIENT = "team-a-client"; static final String TEAM_B_CLIENT = "team-b-client"; static final String BOB = "bob"; + static final String ZERO = "zero"; static final String TOPIC_A = "a_messages"; static final String TOPIC_B = "b_messages"; @@ -55,7 +70,7 @@ public class Common { boolean usePlain; - HashMap tokens; + HashMap tokens = new HashMap<>(); Producer teamAProducer; Consumer teamAConsumer; @@ -68,16 +83,23 @@ public class Common { this.usePlain = oauthOverPlain; } - static HashMap authenticateAllActors() throws IOException { + static void produceToTopic(String topic, Properties config) throws Exception { - HashMap tokens = new HashMap<>(); + try (Producer producer = new KafkaProducer<>(config)) { + producer.send(new ProducerRecord<>(topic, "The Message")).get(); + log.debug("Produced The Message"); + } + } + + void authenticateAllActors() throws IOException { tokens.put(TEAM_A_CLIENT, loginWithClientSecret(URI.create(TOKEN_ENDPOINT_URI), null, null, TEAM_A_CLIENT, TEAM_A_CLIENT + "-secret", true, null, null).token()); tokens.put(TEAM_B_CLIENT, loginWithClientSecret(URI.create(TOKEN_ENDPOINT_URI), null, null, TEAM_B_CLIENT, TEAM_B_CLIENT + "-secret", true, null, null).token()); tokens.put(BOB, loginWithUsernamePassword(URI.create(TOKEN_ENDPOINT_URI), BOB, BOB + "-password", "kafka-cli")); - return tokens; + tokens.put(ZERO, loginWithUsernamePassword(URI.create(TOKEN_ENDPOINT_URI), + ZERO, ZERO + "-password", "kafka-cli")); } static void consume(Consumer consumer, String topic) { @@ -108,14 +130,15 @@ static void consumeFail(Consumer consumer, String topic) { Assert.fail("Should fail with TopicAuthorizationException"); } catch (TopicAuthorizationException expected) { + // ignored } } - static void produce(Producer producer, String topic) throws Exception { + static void produce(Producer producer, String topic) throws InterruptedException, ExecutionException { producer.send(new ProducerRecord<>(topic, "The Message")).get(); } - static void produceFail(Producer producer, String topic) throws Exception { + static void produceFail(Producer producer, String topic) throws InterruptedException { try { produce(producer, topic); Assert.fail("Should not be able to send message"); @@ -125,7 +148,7 @@ static void produceFail(Producer producer, String topic) throws } } - static String loginWithUsernamePassword(URI tokenEndpointUri, String username, String password, String clientId) throws IOException { + public static String loginWithUsernamePassword(URI tokenEndpointUri, String username, String password, String clientId) throws IOException { String body = "grant_type=password&username=" + urlencode(username) + "&password=" + urlencode(password) + "&client_id=" + urlencode(clientId); @@ -145,6 +168,30 @@ static String loginWithUsernamePassword(URI tokenEndpointUri, String username, S return token.asText(); } + + public static void waitForACLs() throws Exception { + + // Create admin client using user `admin:admin-password` over PLAIN listener (port 9100) + try (AdminClient adminClient = buildAdminClientForPlain(PLAIN_LISTENER, "admin")) { + + TestUtil.waitForCondition(() -> { + try { + Collection result = adminClient.describeAcls(new AclBindingFilter(ResourcePatternFilter.ANY, + new AccessControlEntryFilter("User:alice", null, AclOperation.IDEMPOTENT_WRITE, AclPermissionType.ALLOW))).values().get(); + for (AclBinding acl : result) { + if (AclOperation.IDEMPOTENT_WRITE.equals(acl.entry().operation())) { + return true; + } + } + return false; + + } catch (Throwable e) { + throw new RuntimeException("ACLs for User:alice could not be retrieved: ", e); + } + }, 500, 210); + } + } + Producer getProducer(final String name) { return recycleProducer(name, true); } @@ -254,7 +301,7 @@ Map buildAuthConfigForPlain(String name) { : buildAuthConfigForPlain(name, "$accessToken:" + tokens.get(name)); } - Map buildAuthConfigForPlain(String clientId, String secret) { + static Map buildAuthConfigForPlain(String clientId, String secret) { Map config = new HashMap<>(); config.put("username", clientId); config.put("password", secret); @@ -273,7 +320,7 @@ static String getJaasConfigOptionsString(Map options) { return sb.toString(); } - static Properties buildProducerConfigOAuthBearer(String kafkaBootstrap, Map oauthConfig) { + public static Properties buildProducerConfigOAuthBearer(String kafkaBootstrap, Map oauthConfig) { Properties p = buildCommonConfigOAuthBearer(oauthConfig); setCommonProducerProperties(kafkaBootstrap, p); return p; @@ -313,13 +360,13 @@ Properties buildProducerConfig(String kafkaBootstrap, boolean usePlain, String c buildProducerConfigOAuthBearer(kafkaBootstrap, buildAuthConfigForOAuthBearer(clientId)); } - static Properties buildProducerConfigPlain(String kafkaBootstrap, Map plainConfig) { + public static Properties buildProducerConfigPlain(String kafkaBootstrap, Map plainConfig) { Properties p = buildCommonConfigPlain(plainConfig); setCommonProducerProperties(kafkaBootstrap, p); return p; } - static Properties buildProducerConfigScram(String kafkaBootstrap, Map scramConfig) { + public static Properties buildProducerConfigScram(String kafkaBootstrap, Map scramConfig) { Properties p = buildCommonConfigScram(scramConfig); setCommonProducerProperties(kafkaBootstrap, p); return p; @@ -360,11 +407,16 @@ static Properties buildCommonConfigScram(Map scramConfig) { return p; } + public static AdminClient buildAdminClientForPlain(String kafkaBootstrap, String user) { + Properties adminProps = buildProducerConfigPlain(kafkaBootstrap, buildAuthConfigForPlain(user, user + "-password")); + return AdminClient.create(adminProps); + } + void cleanup() { Properties bobAdminProps = buildAdminConfigForAccount(BOB); - AdminClient admin = AdminClient.create(bobAdminProps); - - admin.deleteTopics(Arrays.asList(TOPIC_A, TOPIC_B, TOPIC_X, "non-existing-topic")); - admin.deleteConsumerGroups(Arrays.asList(groupFor(TOPIC_A), groupFor(TOPIC_B), groupFor(TOPIC_X), groupFor("non-existing-topic"))); + try (AdminClient admin = AdminClient.create(bobAdminProps)) { + admin.deleteTopics(Arrays.asList(TOPIC_A, TOPIC_B, TOPIC_X, "non-existing-topic")); + admin.deleteConsumerGroups(Arrays.asList(groupFor(TOPIC_A), groupFor(TOPIC_B), groupFor(TOPIC_X), groupFor("non-existing-topic"))); + } } } diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java similarity index 97% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java index a1ac57b7..93fc1847 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ConfigurationTest.java @@ -14,7 +14,7 @@ public class ConfigurationTest { private final String kafkaContainer; - ConfigurationTest(String kafkaContainer) { + public ConfigurationTest(String kafkaContainer) { this.kafkaContainer = kafkaContainer; } diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/FloodTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/FloodTest.java similarity index 87% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/FloodTest.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/FloodTest.java index 66847432..2c70bc08 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/FloodTest.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/FloodTest.java @@ -4,6 +4,7 @@ */ package io.strimzi.testsuite.oauth.authz; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.KafkaConsumer; @@ -20,29 +21,25 @@ import java.net.URI; import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import static io.strimzi.kafka.oauth.common.OAuthAuthenticator.loginWithClientSecret; -import static io.strimzi.testsuite.oauth.common.TestUtil.getContainerLogsForString; +@SuppressFBWarnings({"THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", "THROWS_METHOD_THROWS_CLAUSE_THROWABLE"}) public class FloodTest extends Common { private static final Logger log = LoggerFactory.getLogger(FloodTest.class); - private final ArrayList threads = new ArrayList<>(); + private final ArrayList threads = new ArrayList<>(); - private static AtomicInteger startedCount; + private static final AtomicInteger STARTED_COUNT = new AtomicInteger(0); static int sendLimit = 1; - private final String kafkaContainer; - - FloodTest(String kafkaContainer, String kafkaBootstrap, boolean oauthOverPlain) { + public FloodTest(String kafkaBootstrap, boolean oauthOverPlain) { super(kafkaBootstrap, oauthOverPlain); - this.kafkaContainer = kafkaContainer; } public void doTest() throws IOException { @@ -52,7 +49,7 @@ public void doTest() throws IOException { /** * This test uses the Kafka listener configured with both OAUTHBEARER and PLAIN. - * + *

      * It connects concurrently with multiple producers with different client IDs using the PLAIN mechanism, testing the OAuth over PLAIN functionality. * With KeycloakRBACAuthorizer configured, any mixup of the credentials between different clients will be caught as * AuthorizationException would be thrown trying to write to the topic if the user context was mismatched. @@ -100,7 +97,11 @@ void clientCredentialsWithFloodTest() throws IOException { startThreads(); // Wait for all threads to finish - joinThreads(); + try { + joinThreads(); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted"); + } // Check for errors checkExceptions(); @@ -127,7 +128,11 @@ void clientCredentialsWithFloodTest() throws IOException { startThreads(); // Wait for all threads to finish - joinThreads(); + try { + joinThreads(); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted"); + } // Check for errors checkExceptions(); @@ -137,22 +142,12 @@ void clientCredentialsWithFloodTest() throws IOException { } } - private int currentFoundExistingGrantsLogCount() { - List lines = getContainerLogsForString(kafkaContainer, "Found existing grants for the token on another session"); - return lines.size(); - } - - private int currentSemaphoreBlockLogCount() { - List lines = getContainerLogsForString(kafkaContainer, "Waiting on another thread to get grants"); - return lines.size(); - } - private void sendSingleMessage(String clientId, String secret, String topic) throws ExecutionException, InterruptedException { Properties props = buildProducerConfig(kafkaBootstrap, usePlain, clientId, secret); - KafkaProducer producer = new KafkaProducer<>(props); - - producer.send(new ProducerRecord<>(topic, "Message 0")) - .get(); + try (KafkaProducer producer = new KafkaProducer<>(props)) { + producer.send(new ProducerRecord<>(topic, "Message 0")) + .get(); + } } private String groupForConsumer(int index) { @@ -173,26 +168,21 @@ public void clearThreads() { } public void startThreads() { - startedCount = new AtomicInteger(0); for (Thread t : threads) { t.start(); } } - public void joinThreads() { + public void joinThreads() throws InterruptedException { for (Thread t : threads) { - try { - t.join(); - } catch (InterruptedException e) { - throw new RuntimeException("Interrupted - exiting ..."); - } + t.join(); } } public void checkExceptions() { try { - for (Thread t : threads) { - ((ClientJob) t).checkException(); + for (ClientJob t : threads) { + t.checkException(); } } catch (RuntimeException e) { throw e; @@ -253,12 +243,12 @@ private void initConsumer() { } public void run() { - int started = startedCount.addAndGet(1); + int started = STARTED_COUNT.addAndGet(1); try { while (started < threads.size()) { Thread.sleep(10); - started = startedCount.get(); + started = STARTED_COUNT.get(); } for (int i = 0; i < sendLimit; i++) { @@ -313,13 +303,13 @@ private void initProducer() { } public void run() { - int started = startedCount.addAndGet(1); + int started = STARTED_COUNT.addAndGet(1); try { while (started < threads.size()) { Thread.sleep(10); - started = startedCount.get(); + started = STARTED_COUNT.get(); } for (int i = 0; i < sendLimit; i++) { diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java similarity index 63% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java index 27e0b6a8..c74579d9 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/MetricsTest.java @@ -4,6 +4,7 @@ */ package io.strimzi.testsuite.oauth.authz; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.testsuite.oauth.common.TestMetrics; import org.junit.Assert; @@ -12,20 +13,20 @@ import static io.strimzi.testsuite.oauth.common.TestMetrics.getPrometheusMetrics; +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") public class MetricsTest { - public static void doTest() throws Exception { + private static final String AUTH_HOST_PORT = "keycloak:8080"; + private static final String REALM = "kafka-authz"; + private static final String JWKS_PATH = "/auth/realms/" + REALM + "/protocol/openid-connect/certs"; - final String authHostPort = "keycloak:8080"; - final String realm = "kafka-authz"; - final String jwksPath = "/auth/realms/" + realm + "/protocol/openid-connect/certs"; - final String tokenPath = "/auth/realms/" + realm + "/protocol/openid-connect/token"; + public static void doTest() throws Exception { TestMetrics metrics = getPrometheusMetrics(URI.create("http://kafka:9404/metrics")); - BigDecimal value = metrics.getValueSum("strimzi_oauth_http_requests_count", "kind", "jwks", "host", authHostPort, "path", jwksPath, "outcome", "success"); + BigDecimal value = metrics.getValueSum("strimzi_oauth_http_requests_count", "kind", "jwks", "host", AUTH_HOST_PORT, "path", JWKS_PATH, "outcome", "success"); Assert.assertTrue("strimzi_oauth_http_requests_count for jwks > 0", value.doubleValue() > 0.0); - value = metrics.getValueSum("strimzi_oauth_http_requests_totaltimems", "kind", "jwks", "host", authHostPort, "path", jwksPath, "outcome", "success"); + value = metrics.getValueSum("strimzi_oauth_http_requests_totaltimems", "kind", "jwks", "host", AUTH_HOST_PORT, "path", JWKS_PATH, "outcome", "success"); Assert.assertTrue("strimzi_oauth_http_requests_totaltimems for jwks > 0.0", value.doubleValue() > 0.0); // Accross all the listeners there should only be 2 client authentication requests - those for inter-broker connection on JWT listener @@ -34,15 +35,22 @@ public static void doTest() throws Exception { value = metrics.getValueSum("strimzi_oauth_authentication_requests_totaltimems", "kind", "client-auth", "outcome", "success"); Assert.assertTrue("strimzi_oauth_authentication_requests_totaltimems for client-auth > 0.0", value.doubleValue() > 0.0); + } + + public static void doTestValidationAndAuthorization() throws Exception { + + final String tokenPath = "/auth/realms/" + REALM + "/protocol/openid-connect/token"; + + TestMetrics metrics = getPrometheusMetrics(URI.create("http://kafka:9404/metrics")); - // Inter-broker auth triggered the only successful validation request - value = metrics.getValueSum("strimzi_oauth_validation_requests_count", "kind", "jwks", "mechanism", "OAUTHBEARER", "outcome", "success"); - Assert.assertEquals("strimzi_oauth_validation_requests_count for jwks == 1", 1, value.intValue()); + BigDecimal value = metrics.getValueSum("strimzi_oauth_validation_requests_count", "kind", "jwks", "mechanism", "OAUTHBEARER", "outcome", "success"); + Assert.assertTrue("strimzi_oauth_validation_requests_count for jwks > 0", value.intValue() > 0); value = metrics.getValueSum("strimzi_oauth_validation_requests_totaltimems", "kind", "jwks", "mechanism", "OAUTHBEARER", "outcome", "success"); Assert.assertTrue("strimzi_oauth_validation_requests_totaltimems for jwks > 0.0", value.doubleValue() > 0.0); - value = metrics.getValueSum("strimzi_oauth_http_requests_count", "kind", "keycloak-authorization", "host", authHostPort, "path", tokenPath, "outcome", "error"); + // No 403 (no grants) responses in this test + value = metrics.getValueSum("strimzi_oauth_http_requests_count", "kind", "keycloak-authorization", "host", AUTH_HOST_PORT, "path", tokenPath, "outcome", "success"); Assert.assertTrue("strimzi_oauth_http_requests_count for keycloak-authorization > 0.0", value.doubleValue() > 0.0); } } diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java similarity index 58% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java index 41e0c9da..41a7f31b 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/MultiSaslTest.java @@ -4,14 +4,12 @@ */ package io.strimzi.testsuite.oauth.authz; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.kafka.oauth.client.ClientConfig; import io.strimzi.testsuite.oauth.common.TestMetrics; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthorizationException; import org.junit.Assert; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.math.BigDecimal; @@ -20,31 +18,31 @@ import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.concurrent.ExecutionException; import static io.strimzi.testsuite.oauth.authz.Common.buildProducerConfigOAuthBearer; import static io.strimzi.testsuite.oauth.authz.Common.buildProducerConfigPlain; -import static io.strimzi.testsuite.oauth.authz.Common.buildProducerConfigScram; +import static io.strimzi.testsuite.oauth.authz.Common.produceToTopic; import static io.strimzi.testsuite.oauth.common.TestMetrics.getPrometheusMetrics; import static io.strimzi.testsuite.oauth.common.TestUtil.getContainerLogsForString; +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") public class MultiSaslTest { - private static final Logger log = LoggerFactory.getLogger(MultiSaslTest.class); - private static final String PLAIN_LISTENER = "kafka:9100"; - private static final String SCRAM_LISTENER = "kafka:9101"; + private static final String JWT_LISTENER = "kafka:9092"; private static final String JWTPLAIN_LISTENER = "kafka:9094"; private final String kafkaContainer; - MultiSaslTest(String kafkaContainer) { + public MultiSaslTest(String kafkaContainer) { this.kafkaContainer = kafkaContainer; } public void doTest() throws Exception { - // bobby:bobby-secret + // bobby:bobby-secret is defined in docker-compose.yaml in the PLAIN listener configuration (port 9100) String username = "bobby"; String password = "bobby-secret"; @@ -61,78 +59,25 @@ public void doTest() throws Exception { try { produceToTopic("KeycloakAuthorizationTest-multiSaslTest-plain-denied", producerProps); Assert.fail("Should have failed"); - } catch (Exception ignored) { - } - - // Producing to SCRAM listener using SASL_SCRAM-SHA-512 should fail. - // User 'bobby' has not been configured for SCRAM in 'docker/kafka/scripts/start.sh' - producerProps = producerConfigScram(SCRAM_LISTENER, username, password); - try { - produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram", producerProps); - Assert.fail("Should have failed"); - } catch (Exception ignored) { - } - - - // alice:alice-secret - username = "alice"; - password = "alice-secret"; - - // Producing to PLAIN listener using SASL/PLAIN should fail. - // User 'alice' has not been configured for PLAIN in PLAIN listener configuration in 'docker-compose.yml' - producerProps = producerConfigPlain(PLAIN_LISTENER, username, password); - try { - produceToTopic("KeycloakAuthorizationTest-multiSaslTest-plain", producerProps); - Assert.fail("Should have failed"); - } catch (Exception ignored) { - } - - // Producing to SCRAM listener using SASL_SCRAM-SHA-512 should succeed. - // The necessary ACLs have been added by 'docker/kafka-acls/scripts/add-acls.sh' - producerProps = producerConfigScram(SCRAM_LISTENER, username, password); - produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram", producerProps); - try { - produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram-denied", producerProps); - Assert.fail("Should have failed"); - } catch (Exception ignored) { - } - - // OAuth authentication should fail - try { - Common.loginWithUsernamePassword( - URI.create("http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token"), - username, password, "kafka-cli"); - - Assert.fail("Should have failed"); - } catch (Exception ignored) { + } catch (ExecutionException e) { + Assert.assertTrue("Instance of authorization exception", e.getCause() instanceof AuthorizationException); } - // alice:alice-password username = "alice"; password = "alice-password"; // Producing to PLAIN listener using SASL/PLAIN should fail. - // User 'alice' was not configured for PLAIN in 'docker-compose.yml' + // User 'alice' was not configured in PLAIN listener jaas configuration (port 9100) in 'docker-compose.yml' producerProps = producerConfigPlain(PLAIN_LISTENER, username, password); try { produceToTopic("KeycloakAuthorizationTest-multiSaslTest-plain", producerProps); Assert.fail("Should have failed"); - } catch (Exception ignored) { - } - - // Producing to SCRAM listener using SASL_SCRAM-SHA-512 should fail. - // User 'alice' was configured for SASL in 'docker/kafka/scripts/start.sh' but with a different password - producerProps = producerConfigScram(SCRAM_LISTENER, username, password); - try { - produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram", producerProps); - Assert.fail("Should have failed"); - } catch (Exception ignored) { + } catch (ExecutionException e) { + Assert.assertTrue("Instance of authentication exception", e.getCause() instanceof AuthenticationException); } - // Test the grants reuse feature int fetchGrantsCount = currentFetchGrantsLogCount(); - checkAuthorizationGrantsReuse(0); // Producing to JWT listener using SASL/OAUTHBEARER using access token should succeed String accessToken = Common.loginWithUsernamePassword( @@ -141,8 +86,6 @@ public void doTest() throws Exception { producerProps = producerConfigOAuthBearerAccessToken(JWT_LISTENER, accessToken); produceToTopic("KeycloakAuthorizationTest-multiSaslTest-oauthbearer", producerProps); - // Test the grants reuse feature - checkAuthorizationGrantsReuse(2); checkGrantsFetchCountDiff(fetchGrantsCount); // producing to JWTPLAIN listener using SASL/PLAIN using $accessToken should succeed @@ -157,16 +100,6 @@ public void doTest() throws Exception { checkGrantsMetrics(authHostPort, tokenPath); } - private void checkAuthorizationGrantsReuse(int numberOfReuses) { - List lines = getContainerLogsForString(kafkaContainer, "Found existing grants for the token on another session"); - - if (numberOfReuses == 0) { - Assert.assertEquals("There should be no reuse of existing grants in Kafka log yet", 0, lines.size()); - } else { - Assert.assertTrue("There should be " + numberOfReuses + " reuses of existing grants in Kafka log", lines.size() >= numberOfReuses); - } - } - private int currentFetchGrantsLogCount() { List lines = getContainerLogsForString(kafkaContainer, "Fetching grants from Keycloak"); return lines.size(); @@ -185,11 +118,14 @@ private static void checkGrantsMetrics(String authHostPort, String tokenPath) th value = metrics.getValueSum("strimzi_oauth_http_requests_totaltimems", "kind", "keycloak-authorization", "host", authHostPort, "path", tokenPath, "outcome", "success"); Assert.assertTrue("strimzi_oauth_http_requests_totaltimems for keycloak-authorization > 0", value.doubleValue() > 0.0); - value = metrics.getValueSum("strimzi_oauth_http_requests_count", "kind", "keycloak-authorization", "host", authHostPort, "path", tokenPath, "outcome", "error", "status", "403"); - Assert.assertTrue("strimzi_oauth_http_requests_count with no-grants for keycloak-authorization > 0", value.intValue() > 0); + // There are 403 responses in Zookeeper mode, but not in KRaft mode + // Apparently the inter-broker session to JWT listener is not attempted in KRaft mode + + //value = metrics.getValueSum("strimzi_oauth_http_requests_count", "kind", "keycloak-authorization", "host", authHostPort, "path", tokenPath, "outcome", "error", "status", "403"); + //Assert.assertTrue("strimzi_oauth_http_requests_count with no-grants for keycloak-authorization > 0", value.intValue() > 0); - value = metrics.getValueSum("strimzi_oauth_http_requests_totaltimems", "kind", "keycloak-authorization", "host", authHostPort, "path", tokenPath, "outcome", "error", "status", "403"); - Assert.assertTrue("strimzi_oauth_http_requests_totaltimems with no-grants for keycloak-authorization > 0", value.doubleValue() > 0.0); + //value = metrics.getValueSum("strimzi_oauth_http_requests_totaltimems", "kind", "keycloak-authorization", "host", authHostPort, "path", tokenPath, "outcome", "error", "status", "403"); + //Assert.assertTrue("strimzi_oauth_http_requests_totaltimems with no-grants for keycloak-authorization > 0", value.doubleValue() > 0.0); } private static void checkAuthorizationRequestsMetrics(String authHostPort, String tokenPath) throws IOException { @@ -208,14 +144,6 @@ private static void checkAuthorizationRequestsMetrics(String authHostPort, Strin Assert.assertEquals("strimzi_oauth_authorization_requests_totaltimems for failed keycloak-authorization == 0", 0.0, value.doubleValue(), 0.0); } - private static Properties producerConfigScram(String kafkaBootstrap, String username, String password) { - Map scramConfig = new HashMap<>(); - scramConfig.put("username", username); - scramConfig.put("password", password); - - return buildProducerConfigScram(kafkaBootstrap, scramConfig); - } - private static Properties producerConfigPlain(String kafkaBootstrap, String username, String password) { Map scramConfig = new HashMap<>(); scramConfig.put("username", username); @@ -231,12 +159,4 @@ private static Properties producerConfigOAuthBearerAccessToken(String kafkaBoots return buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); } - - private static void produceToTopic(String topic, Properties config) throws Exception { - - Producer producer = new KafkaProducer<>(config); - - producer.send(new ProducerRecord<>(topic, "The Message")).get(); - log.debug("Produced The Message"); - } } diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java similarity index 60% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java index f7bb5275..0e2f0c3c 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/OAuthOverPlainTest.java @@ -6,7 +6,7 @@ public class OAuthOverPlainTest extends BasicTest { - public OAuthOverPlainTest(String kafkaBootstrap, boolean oauthOverPlain) { - super(kafkaBootstrap, oauthOverPlain); + public OAuthOverPlainTest(String kafkaContainer, String kafkaBootstrap, boolean oauthOverPlain) { + super(kafkaContainer, kafkaBootstrap, oauthOverPlain); } } diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java similarity index 98% rename from testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java rename to testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java index 4d33bbb4..df3e6b3c 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/RefreshTest.java @@ -5,6 +5,7 @@ package io.strimzi.testsuite.oauth.authz; import com.fasterxml.jackson.databind.JsonNode; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.strimzi.kafka.oauth.common.HttpUtil; import org.apache.kafka.clients.producer.Producer; import org.junit.Assert; @@ -15,15 +16,17 @@ import java.util.Iterator; import java.util.Map; +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") public class RefreshTest extends BasicTest { - RefreshTest(String kafkaBootstrap, boolean oauthOverPlain) { - super(kafkaBootstrap, oauthOverPlain); + public RefreshTest(String kafkaContainer, String kafkaBootstrap, boolean oauthOverPlain) { + super(kafkaContainer, kafkaBootstrap, oauthOverPlain); } + @Override public void doTest() throws Exception { - tokens = authenticateAllActors(); + authenticateAllActors(); testTeamAClientPart1(); diff --git a/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ScramTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ScramTest.java new file mode 100644 index 00000000..604b70e4 --- /dev/null +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/ScramTest.java @@ -0,0 +1,81 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.authz; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.strimzi.kafka.oauth.common.HttpException; +import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.AuthorizationException; +import org.junit.Assert; + +import java.net.URI; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ExecutionException; + +import static io.strimzi.testsuite.oauth.authz.Common.buildProducerConfigScram; +import static io.strimzi.testsuite.oauth.authz.Common.produceToTopic; + +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") +public class ScramTest { + + private static final String SCRAM_LISTENER = "kafka:9101"; + + private static Properties producerConfigScram(String kafkaBootstrap, String username, String password) { + Map scramConfig = new HashMap<>(); + scramConfig.put("username", username); + scramConfig.put("password", password); + + return buildProducerConfigScram(kafkaBootstrap, scramConfig); + } + + public void doTest() throws Exception { + testScramAuthenticatedSessions(); + } + + void testScramAuthenticatedSessions() throws Exception { + // bobby:bobby-secret is defined in docker-compose.yaml in the PLAIN listener configuration (port 9100) + String username = "bobby"; + String password = "bobby-secret"; + + // Producing to SCRAM listener using SASL_SCRAM-SHA-512 should fail. + // User 'bobby' has not been configured for SCRAM in 'docker/kafka/scripts/start.sh' + Properties producerProps = producerConfigScram(SCRAM_LISTENER, username, password); + try { + produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram", producerProps); + Assert.fail("Should have failed"); + } catch (ExecutionException e) { + Assert.assertTrue("Instance of authentication exception", e.getCause() instanceof AuthenticationException); + } + + // alice:alice-secret (user 'alice' has been configured for SCRAM in 'docker/kafka/scripts/start.sh') + username = "alice"; + password = "alice-secret"; + + // Producing to SCRAM listener using SASL_SCRAM-SHA-512 should succeed for KeycloakAuthorizationTest-multiSaslTest-scram. + // User 'alice' was configured for SASL SCRAM in 'docker/kafka/scripts/start.sh' + // The necessary ACLs have been added by 'docker/kafka-acls/scripts/add-acls.sh' + producerProps = producerConfigScram(SCRAM_LISTENER, username, password); + produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram", producerProps); + try { + produceToTopic("KeycloakAuthorizationTest-multiSaslTest-scram-denied", producerProps); + Assert.fail("Should have failed"); + } catch (ExecutionException e) { + Assert.assertTrue("Instance of authorization exception", e.getCause() instanceof AuthorizationException); + } + + // OAuth authentication using SCRAM password should fail + try { + Common.loginWithUsernamePassword( + URI.create("http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token"), + username, password, "kafka-cli"); + + Assert.fail("Should have failed"); + } catch (HttpException e) { + Assert.assertEquals("Status 401", 401, e.getStatus()); + } + } +} diff --git a/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/SingletonTest.java b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/SingletonTest.java new file mode 100644 index 00000000..c2a9eb37 --- /dev/null +++ b/testsuite/keycloak-authz-tests/src/main/java/io/strimzi/testsuite/oauth/authz/SingletonTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.authz; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.strimzi.testsuite.oauth.common.ContainerLogLineReader; +import org.junit.Assert; + +import java.util.List; +import java.util.stream.Collectors; + +@SuppressFBWarnings("THROWS_METHOD_THROWS_CLAUSE_BASIC_EXCEPTION") +public class SingletonTest { + + private final String kafkaContainer; + + public SingletonTest(String kafkaContainer) { + this.kafkaContainer = kafkaContainer; + } + + /** + * Ensure that multiple instantiated KeycloakAuthorizers share a single instance of KeycloakRBACAuthorizer"); + * + * @throws Exception If any error occurs + */ + public void doSingletonTest(int keycloakAuthorizersCount) throws Exception { + + ContainerLogLineReader logReader = new ContainerLogLineReader(kafkaContainer); + + List lines = logReader.readNext(); + List keycloakAuthorizerLines = lines.stream().filter(line -> line.contains("Configured KeycloakAuthorizer@")).collect(Collectors.toList()); + List keycloakRBACAuthorizerLines = lines.stream().filter(line -> line.contains("Configured KeycloakRBACAuthorizer@")).collect(Collectors.toList()); + + Assert.assertEquals("Configured KeycloakAuthorizer", keycloakAuthorizersCount, keycloakAuthorizerLines.size()); + Assert.assertEquals("Configured KeycloakRBACAuthorizer", 1, keycloakRBACAuthorizerLines.size()); + } +} diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/BasicTest.java b/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/BasicTest.java deleted file mode 100644 index 51f936d5..00000000 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/BasicTest.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright 2017-2020, Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.testsuite.oauth.authz; - -import org.apache.kafka.clients.admin.AdminClient; -import org.apache.kafka.clients.admin.NewTopic; -import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; - -import java.util.Properties; - -import static java.util.Collections.singletonList; - - -public class BasicTest extends Common { - - public BasicTest(String kafkaBootstrap, boolean oauthOverPlain) { - super(kafkaBootstrap, oauthOverPlain); - } - - public void doTest() throws Exception { - - tokens = authenticateAllActors(); - - testTeamAClientPart1(); - - testTeamBClientPart1(); - - createTopicAsClusterManager(); - - testTeamAClientPart2(); - - testTeamBClientPart2(); - - testClusterManager(); - - cleanup(); - } - - void createTopicAsClusterManager() throws Exception { - - Properties bobAdminProps = buildAdminConfigForAccount(BOB); - AdminClient admin = AdminClient.create(bobAdminProps); - - // - // Create x_* topic - // - admin.createTopics(singletonList(new NewTopic(TOPIC_X, 1, (short) 1))).all().get(); - } - - void testClusterManager() throws Exception { - - Properties bobAdminProps = buildProducerConfigForAccount(BOB); - Producer producer = new KafkaProducer<>(bobAdminProps); - - Properties consumerProps = buildConsumerConfigForAccount(BOB); - Consumer consumer = new KafkaConsumer<>(consumerProps); - - // - // bob should succeed producing to x_* topic - // - produce(producer, TOPIC_X); - - // - // bob should succeed producing to a_* topic - // - produce(producer, TOPIC_A); - - // - // bob should succeed producing to b_* topic - // - produce(producer, TOPIC_B); - - // - // bob should succeed producing to non-existing topic - // - produce(producer, "non-existing-topic"); - - // - // bob should succeed consuming from x_* topic - // - consume(consumer, TOPIC_X); - - // - // bob should succeed consuming from a_* topic - // - consume(consumer, TOPIC_A); - - // - // bob should succeed consuming from b_* topic - // - consume(consumer, TOPIC_B); - - // - // bob should succeed consuming from "non-existing-topic" - which now exists - // - consume(consumer, "non-existing-topic"); - } - - void testTeamAClientPart1() throws Exception { - - Producer teamAProducer = getProducer(TEAM_A_CLIENT); - - // - // team-a-client should fail to produce to b_* topic - // - produceFail(teamAProducer, TOPIC_B); - - // Re-init producer because message to topicB is stuck in the queue, and any subsequent message to another queue - // won't be handled until first message makes it through. - teamAProducer = newProducer(TEAM_A_CLIENT); - - // - // team-a-client should succeed producing to a_* topic - // - produce(teamAProducer, TOPIC_A); - - // - // team-a-client should also fail producing to non-existing x_* topic (fails to create it) - // - produceFail(teamAProducer, TOPIC_X); - - Consumer teamAConsumer = newConsumer(TEAM_A_CLIENT, TOPIC_B); - - // - // team-a-client should fail consuming from b_* topic - // - consumeFail(teamAConsumer, TOPIC_B); - - - // Close and re-init consumer - teamAConsumer = newConsumer(TEAM_A_CLIENT, TOPIC_A); - - // - // team-a-client should succeed consuming from a_* topic - // - consume(teamAConsumer, TOPIC_A); - - // - // team-a-client should fail consuming from x_* topic - it doesn't exist - // - consumeFail(teamAConsumer, TOPIC_X); - } - - void testTeamBClientPart1() throws Exception { - - Producer teamBProducer = getProducer(TEAM_B_CLIENT); - - // - // team-b-client should fail to produce to a_* topic - // - produceFail(teamBProducer, TOPIC_A); - - // Re-init producer because message to topicA is stuck in the queue, and any subsequent message to another queue - // won't be handled until first message makes it through. - teamBProducer = newProducer(TEAM_B_CLIENT); - - // - // team-b-client should succeed producing to b_* topic - // - produce(teamBProducer, TOPIC_B); - - // - // team-b-client should fail to produce to x_* topic - // - produceFail(teamBProducer, TOPIC_X); - - - Consumer teamBConsumer = newConsumer(TEAM_B_CLIENT, TOPIC_A); - - // - // team-b-client should fail consuming from a_* topic - // - consumeFail(teamBConsumer, TOPIC_A); - - // Close and re-init consumer - teamBConsumer = newConsumer(TEAM_B_CLIENT, TOPIC_B); - - // - // team-b-client should succeed consuming from b_* topic - // - consume(teamBConsumer, TOPIC_B); - } - - void testTeamAClientPart2() throws Exception { - - // - // team-a-client should succeed producing to existing x_* topic - // - Producer teamAProducer = newProducer(TEAM_A_CLIENT); - - produce(teamAProducer, TOPIC_X); - - // - // team-a-client should fail reading from x_* topic - // - Consumer teamAConsumer = newConsumer(TEAM_A_CLIENT, TOPIC_A); - consumeFail(teamAConsumer, TOPIC_X); - } - - void testTeamBClientPart2() throws Exception { - // - // team-b-client should succeed consuming from x_* topic - // - Consumer teamBConsumer = newConsumer(TEAM_B_CLIENT, TOPIC_B); - consume(teamBConsumer, TOPIC_X); - - - // - // team-b-client should fail producing to x_* topic - // - Producer teamBProducer = newProducer(TEAM_B_CLIENT); - produceFail(teamBProducer, TOPIC_X); - } -} diff --git a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/KeycloakAuthorizationTests.java b/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/KeycloakAuthorizationTests.java index e0a190e7..36907e06 100644 --- a/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/KeycloakAuthorizationTests.java +++ b/testsuite/keycloak-authz-tests/src/test/java/io/strimzi/testsuite/oauth/authz/KeycloakAuthorizationTests.java @@ -17,12 +17,15 @@ import java.io.File; import java.time.Duration; +import static io.strimzi.testsuite.oauth.authz.Common.waitForACLs; +import static io.strimzi.testsuite.oauth.common.TestUtil.logStart; + /** - * Tests for OAuth authentication using Keycloak + Keycloak Authorization Services based authorization - * + * Tests for OAuth authentication using Keycloak + Keycloak Authorization Services based authorization when KeycloakRBACAuthorizer is configured on the Kafka broker running in Zookeeper mode. + *

      * This test assumes there are multiple listeners configured with OAUTHBEARER or PLAIN support, but each configured differently * - configured with different options, or different realm. - * + *

      * There is KeycloakRBACAuthorizer configured on the Kafka broker. */ public class KeycloakAuthorizationTests { @@ -33,13 +36,10 @@ public class KeycloakAuthorizationTests { .withServices("keycloak", "zookeeper", "kafka", "kafka-acls") // ensure kafka has started .waitingFor("kafka", Wait.forLogMessage(".*started \\(kafka.server.KafkaServer\\).*", 1) - .withStartupTimeout(Duration.ofSeconds(180))) + .withStartupTimeout(Duration.ofSeconds(120))); + // ensure ACLs for user 'alice' have been added - .waitingFor("kafka", Wait.forLogMessage(".*User:alice has ALLOW permission for operations: IDEMPOTENT_WRITE.*", 1) - .withStartupTimeout(Duration.ofSeconds(210))) - // ensure a grants fetch request to 'keycloak' has been performed by authorizer's grants refresh job - .waitingFor("kafka", Wait.forLogMessage(".*after: \\{\\}.*", 1) - .withStartupTimeout(Duration.ofSeconds(210))); + // Moved into test code: waitForACLs() @Rule public TestRule logCollector = new TestContainersLogCollector(environment); @@ -52,6 +52,7 @@ public class KeycloakAuthorizationTests { private static final String INTROSPECTPLAIN_LISTENER = "kafka:9095"; private static final String JWTREFRESH_LISTENER = "kafka:9096"; + @Test public void doTest() throws Exception { try { @@ -60,47 +61,50 @@ public void doTest() throws Exception { logStart("KeycloakAuthorizationTest :: ConfigurationTest"); new ConfigurationTest(kafkaContainer).doTest(); - logStart("KeycloakAuthorizationTest :: MetricsTest"); + logStart("KeycloakAuthorizationTest :: MetricsTest (part 1)"); MetricsTest.doTest(); + // Ensure ACLs have been added to Kafka cluster + waitForACLs(); + // This test assumes that it is the first producing and consuming test - logStart("KeycloakAuthorizationTest :: MultiSaslTests"); + logStart("KeycloakAuthorizationTest :: MultiSaslTest"); new MultiSaslTest(kafkaContainer).doTest(); + logStart("KeycloakAuthorizationTest :: ScramTest"); + new ScramTest().doTest(); + logStart("KeycloakAuthorizationTest :: JwtValidationAuthzTest"); - new BasicTest(JWT_LISTENER, false).doTest(); + new BasicTest(kafkaContainer, JWT_LISTENER, false).doTest(); logStart("KeycloakAuthorizationTest :: IntrospectionValidationAuthzTest"); - new BasicTest(INTROSPECT_LISTENER, false).doTest(); + new BasicTest(kafkaContainer, INTROSPECT_LISTENER, false).doTest(); + + logStart("KeycloakAuthorizationTest :: MetricsTest (part 2)"); + MetricsTest.doTestValidationAndAuthorization(); logStart("KeycloakAuthorizationTest :: OAuthOverPlain + JwtValidationAuthzTest"); - new OAuthOverPlainTest(JWTPLAIN_LISTENER, true).doTest(); + new OAuthOverPlainTest(kafkaContainer, JWTPLAIN_LISTENER, true).doTest(); logStart("KeycloakAuthorizationTest :: OAuthOverPlain + IntrospectionValidationAuthzTest"); - new OAuthOverPlainTest(INTROSPECTPLAIN_LISTENER, true).doTest(); + new OAuthOverPlainTest(kafkaContainer, INTROSPECTPLAIN_LISTENER, true).doTest(); logStart("KeycloakAuthorizationTest :: OAuthOverPLain + FloodTest"); - new FloodTest(kafkaContainer, JWTPLAIN_LISTENER, true).doTest(); + new FloodTest(JWTPLAIN_LISTENER, true).doTest(); logStart("KeycloakAuthorizationTest :: JWT FloodTest"); - new FloodTest(kafkaContainer, JWT_LISTENER, false).doTest(); + new FloodTest(JWT_LISTENER, false).doTest(); logStart("KeycloakAuthorizationTest :: Introspection FloodTest"); - new FloodTest(kafkaContainer, INTROSPECT_LISTENER, false).doTest(); + new FloodTest(INTROSPECT_LISTENER, false).doTest(); // This test has to be the last one - it changes the team-a-client, and team-b-client permissions in Keycloak logStart("KeycloakAuthorizationTest :: JwtValidationAuthzTest + RefreshGrants"); - new RefreshTest(JWTREFRESH_LISTENER, false).doTest(); + new RefreshTest(kafkaContainer, JWTREFRESH_LISTENER, false).doTest(); } catch (Throwable e) { log.error("Keycloak Authorization Test failed: ", e); throw e; } } - - private void logStart(String msg) { - System.out.println(); - System.out.println("======== " + msg); - System.out.println(); - } } diff --git a/testsuite/keycloak-authz-zk-tests/docker-compose.yml b/testsuite/keycloak-authz-zk-tests/docker-compose.yml new file mode 100644 index 00000000..72918760 --- /dev/null +++ b/testsuite/keycloak-authz-zk-tests/docker-compose.yml @@ -0,0 +1,158 @@ +version: '3' + +services: + keycloak: + image: quay.io/keycloak/keycloak:19.0.3-legacy + ports: + - "8080:8080" + - "8443:8443" + volumes: + - ${PWD}/../docker/keycloak/realms:/opt/jboss/keycloak/realms + + entrypoint: "" + + command: + - /bin/bash + - -c + - cd /opt/jboss && /opt/jboss/tools/docker-entrypoint.sh -Dkeycloak.profile.feature.upload_scripts=enabled -b 0.0.0.0 + + environment: + - KEYCLOAK_USER=admin + - KEYCLOAK_PASSWORD=admin + - KEYCLOAK_HTTPS_PORT=8443 + - PROXY_ADDRESS_FORWARDING=true + - KEYCLOAK_IMPORT=/opt/jboss/keycloak/realms/kafka-authz-realm.json + + kafka: + image: ${KAFKA_DOCKER_IMAGE} + ports: + - "9092:9092" + - "9093:9093" + - "9094:9094" + - "9095:9095" + - "9096:9096" + - "9100:9100" + - "9101:9101" + + # Prometheus JMX Exporter + - "9404:9404" + + # javaagent debug port + - "5006:5006" + volumes: + - ${PWD}/../docker/target/kafka/libs:/opt/kafka/libs/strimzi + - ${PWD}/../docker/kafka/config:/opt/kafka/config/strimzi + - ${PWD}/../docker/kafka/scripts:/opt/kafka/strimzi + command: + - /bin/bash + - -c + - cd /opt/kafka/strimzi && ./start.sh + environment: + + #- KAFKA_DEBUG=y + #- DEBUG_SUSPEND_FLAG=y + #- JAVA_DEBUG_PORT=*:5006 + + - KAFKA_BROKER_ID=1 + - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_LISTENERS=JWT://kafka:9092,INTROSPECT://kafka:9093,JWTPLAIN://kafka:9094,INTROSPECTPLAIN://kafka:9095,JWTREFRESH://kafka:9096,PLAIN://kafka:9100,SCRAM://kafka:9101 + - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=JWT:SASL_PLAINTEXT,INTROSPECT:SASL_PLAINTEXT,JWTPLAIN:SASL_PLAINTEXT,INTROSPECTPLAIN:SASL_PLAINTEXT,JWTREFRESH:SASL_PLAINTEXT,PLAIN:SASL_PLAINTEXT,SCRAM:SASL_PLAINTEXT + - KAFKA_SASL_ENABLED_MECHANISMS=OAUTHBEARER + - KAFKA_INTER_BROKER_LISTENER_NAME=JWT + - KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=OAUTHBEARER + + - KAFKA_PRINCIPAL_BUILDER_CLASS=io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder + + # Common settings for all the listeners + # username extraction from JWT token claim + - OAUTH_USERNAME_CLAIM=preferred_username + - OAUTH_CONNECT_TIMEOUT_SECONDS=20 + + - OAUTH_ENABLE_METRICS=true + + # Configuration of individual listeners + - KAFKA_LISTENER_NAME_JWT_OAUTHBEARER_SASL_JAAS_CONFIG=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required oauth.jwks.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/certs\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" oauth.groups.claim=\"$$.realm_access.roles\" ; + - KAFKA_LISTENER_NAME_JWT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + - KAFKA_LISTENER_NAME_JWT_OAUTHBEARER_SASL_LOGIN_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler + + - KAFKA_LISTENER_NAME_INTROSPECT_OAUTHBEARER_SASL_JAAS_CONFIG=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required oauth.introspection.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token/introspect\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_INTROSPECT_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + + - KAFKA_LISTENER_NAME_JWTPLAIN_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_JWTPLAIN_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required oauth.jwks.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/certs\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_JWTPLAIN_PLAIN_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler + + - KAFKA_LISTENER_NAME_INTROSPECTPLAIN_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_INTROSPECTPLAIN_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required oauth.introspection.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token/introspect\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_INTROSPECTPLAIN_PLAIN_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.plain.JaasServerOauthOverPlainValidatorCallbackHandler + + - KAFKA_LISTENER_NAME_JWTREFRESH_OAUTHBEARER_SASL_JAAS_CONFIG=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required oauth.jwks.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/certs\" oauth.valid.issuer.uri=\"http://keycloak:8080/auth/realms/kafka-authz\" oauth.token.endpoint.uri=\"http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token\" oauth.client.id=\"kafka\" oauth.client.secret=\"kafka-secret\" oauth.jwks.refresh.min.pause.seconds=\"2\" unsecuredLoginStringClaim_sub=\"admin\" ; + - KAFKA_LISTENER_NAME_JWTREFRESH_OAUTHBEARER_SASL_SERVER_CALLBACK_HANDLER_CLASS=io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler + # Enable re-authentication + - KAFKA_LISTENER_NAME_JWTREFRESH_OAUTHBEARER_CONNECTIONS_MAX_REAUTH_MS=3600000 + + - KAFKA_LISTENER_NAME_PLAIN_SASL_ENABLED_MECHANISMS=PLAIN + - KAFKA_LISTENER_NAME_PLAIN_PLAIN_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-password\" user_admin=\"admin-password\" user_bobby=\"bobby-secret\" ; + + - KAFKA_LISTENER_NAME_SCRAM_SASL_ENABLED_MECHANISMS=SCRAM-SHA-512 + - KAFKA_LISTENER_NAME_SCRAM_SCRAM__2DSHA__2D512_SASL_JAAS_CONFIG=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"admin-secret\" ; + + + + # Authorizer configuration + - KAFKA_AUTHORIZER_CLASS_NAME=io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer + + - KAFKA_STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI=http://keycloak:8080/auth/realms/kafka-authz/protocol/openid-connect/token + - KAFKA_STRIMZI_AUTHORIZATION_CLIENT_ID=kafka + - KAFKA_STRIMZI_AUTHORIZATION_CLIENT_SECRET=kafka-secret + - KAFKA_STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME=my-cluster + - KAFKA_STRIMZI_AUTHORIZATION_DELEGATE_TO_KAFKA_ACL=true + - KAFKA_STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS=45 + + # Parameters controlling the refreshing of grants + - KAFKA_STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE=4 + + # Any change to permissions will be reflected within 10 seconds + # Has to be set to 10 seconds for keycloak-authz*-tests/**/RefreshTest + - KAFKA_STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS=10 + + # If a grants fetch fails, immediately perform one retry + - KAFKA_STRIMZI_AUTHORIZATION_HTTP_RETRIES=1 + + # Use grants fetched for another session if available + - KAFKA_STRIMZI_AUTHORIZATION_REUSE_GRANTS=true + + - KAFKA_STRIMZI_AUTHORIZATION_ENABLE_METRICS=true + + - KAFKA_SUPER_USERS=User:admin;User:service-account-kafka + + # Other configuration + - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 + + # For start.sh script to know where the keycloak is listening + - KEYCLOAK_HOST=${KEYCLOAK_HOST:-keycloak} + - REALM=${REALM:-kafka-authz} + + kafka-acls: + image: ${KAFKA_DOCKER_IMAGE} + links: + - kafka + volumes: + - ${PWD}/../docker/kafka-acls/scripts:/opt/kafka/strimzi + command: + - /bin/bash + - -c + - cd /opt/kafka/strimzi && ./add-acls.sh + + zookeeper: + image: ${KAFKA_DOCKER_IMAGE} + ports: + - "2181:2181" + volumes: + - ${PWD}/../docker/zookeeper/scripts:/opt/kafka/strimzi + command: + - /bin/bash + - -c + - cd /opt/kafka/strimzi && ./start.sh + environment: + - LOG_DIR=/tmp/logs diff --git a/testsuite/keycloak-authz-zk-tests/pom.xml b/testsuite/keycloak-authz-zk-tests/pom.xml new file mode 100644 index 00000000..4acdb948 --- /dev/null +++ b/testsuite/keycloak-authz-zk-tests/pom.xml @@ -0,0 +1,63 @@ + + + + 4.0.0 + + + io.strimzi.oauth.testsuite + kafka-oauth-testsuite + 1.0.0-SNAPSHOT + + + keycloak-authz-zk-tests + + + + Apache License, Version 2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + + ../.. + + + + + org.testcontainers + testcontainers + test + + + junit + junit + ${version.junit} + + + io.strimzi.oauth.testsuite + common + + + io.strimzi.oauth.testsuite + keycloak-authz-tests + + + + io.strimzi + kafka-oauth-common + + + io.strimzi + kafka-oauth-client + + + org.apache.kafka + kafka-clients + + + org.slf4j + slf4j-simple + + + \ No newline at end of file diff --git a/testsuite/keycloak-authz-zk-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakZKAuthorizationTests.java b/testsuite/keycloak-authz-zk-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakZKAuthorizationTests.java new file mode 100644 index 00000000..4ea0534f --- /dev/null +++ b/testsuite/keycloak-authz-zk-tests/src/test/java/io/strimzi/testsuite/oauth/authz/kraft/KeycloakZKAuthorizationTests.java @@ -0,0 +1,122 @@ +/* + * Copyright 2017-2020, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.authz.kraft; + +import io.strimzi.testsuite.oauth.authz.BasicTest; +import io.strimzi.testsuite.oauth.authz.ConfigurationTest; +import io.strimzi.testsuite.oauth.authz.FloodTest; +import io.strimzi.testsuite.oauth.authz.MetricsTest; +import io.strimzi.testsuite.oauth.authz.MultiSaslTest; +import io.strimzi.testsuite.oauth.authz.OAuthOverPlainTest; +import io.strimzi.testsuite.oauth.authz.RefreshTest; +import io.strimzi.testsuite.oauth.authz.ScramTest; +import io.strimzi.testsuite.oauth.authz.SingletonTest; +import io.strimzi.testsuite.oauth.common.TestContainersLogCollector; +import io.strimzi.testsuite.oauth.common.TestContainersWatcher; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestRule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.wait.strategy.Wait; + +import java.io.File; +import java.time.Duration; + +import static io.strimzi.testsuite.oauth.authz.Common.waitForACLs; +import static io.strimzi.testsuite.oauth.common.TestUtil.logStart; + +/** + * Tests for OAuth authentication using Keycloak + Keycloak Authorization Services based authorization when KeycloakAuthorizer is configured on the Kafka broker running in Zookeeper mode. + *

      + * This test assumes there are multiple listeners configured with OAUTHBEARER or PLAIN support, but each configured differently + * - configured with different options, or different realm. + *

      + * There is KeycloakAuthorizer configured on the Kafka broker. + */ +public class KeycloakZKAuthorizationTests { + + @ClassRule + public static TestContainersWatcher environment = + new TestContainersWatcher(new File("docker-compose.yml")) + .withServices("keycloak", "zookeeper", "kafka", "kafka-acls") + // ensure kafka has started + .waitingFor("kafka", Wait.forLogMessage(".*started \\(kafka.server.KafkaServer\\).*", 1) + .withStartupTimeout(Duration.ofSeconds(120))); + + // ensure ACLs for user 'alice' have been added + // Moved into test code: waitForACLs() + + @Rule + public TestRule logCollector = new TestContainersLogCollector(environment); + + private static final Logger log = LoggerFactory.getLogger(KeycloakZKAuthorizationTests.class); + + private static final String JWT_LISTENER = "kafka:9092"; + private static final String INTROSPECT_LISTENER = "kafka:9093"; + private static final String JWTPLAIN_LISTENER = "kafka:9094"; + private static final String INTROSPECTPLAIN_LISTENER = "kafka:9095"; + private static final String JWTREFRESH_LISTENER = "kafka:9096"; + + + @Test + public void doTest() throws Exception { + try { + + String kafkaContainer = environment.getContainerByServiceName("kafka_1").get().getContainerInfo().getName().substring(1); + + logStart("KeycloakZKAuthorizationTest :: ConfigurationTest"); + new ConfigurationTest(kafkaContainer).doTest(); + + logStart("KeycloakZKAuthorizationTest :: MetricsTest (part 1)"); + MetricsTest.doTest(); + + // Before running the rest of the tests, ensure ACLs have been added to Kafka cluster + waitForACLs(); + + logStart("KeycloakZKAuthorizationTest :: MultiSaslTests"); + new MultiSaslTest(kafkaContainer).doTest(); + + logStart("KeycloakZKAuthorizationTest :: ScramTest"); + new ScramTest().doTest(); + + logStart("KeycloakZKAuthorizationTest :: JwtValidationAuthzTest"); + new BasicTest(kafkaContainer, JWT_LISTENER, false).doTest(); + + logStart("KeycloakZKAuthorizationTest :: IntrospectionValidationAuthzTest"); + new BasicTest(kafkaContainer, INTROSPECT_LISTENER, false).doTest(); + + logStart("KeycloakZKAuthorizationTest :: MetricsTest (part 2)"); + MetricsTest.doTestValidationAndAuthorization(); + + logStart("KeycloakZKAuthorizationTest :: OAuthOverPlain + JwtValidationAuthzTest"); + new OAuthOverPlainTest(kafkaContainer, JWTPLAIN_LISTENER, true).doTest(); + + logStart("KeycloakZKAuthorizationTest :: OAuthOverPlain + IntrospectionValidationAuthzTest"); + new OAuthOverPlainTest(kafkaContainer, INTROSPECTPLAIN_LISTENER, true).doTest(); + + logStart("KeycloakZKAuthorizationTest :: OAuthOverPLain + FloodTest"); + new FloodTest(JWTPLAIN_LISTENER, true).doTest(); + + logStart("KeycloakZKAuthorizationTest :: JWT FloodTest"); + new FloodTest(JWT_LISTENER, false).doTest(); + + logStart("KeycloakZKAuthorizationTest :: Introspection FloodTest"); + new FloodTest(INTROSPECT_LISTENER, false).doTest(); + + // This test has to be the last one - it changes the team-a-client, and team-b-client permissions in Keycloak + logStart("KeycloakZKAuthorizationTest :: JwtValidationAuthzTest + RefreshGrants"); + new RefreshTest(kafkaContainer, JWTREFRESH_LISTENER, false).doTest(); + + logStart("KeycloakKRaftAuthorizationTest :: SingletonTest"); + new SingletonTest(kafkaContainer).doSingletonTest(1); + + } catch (Throwable e) { + log.error("Keycloak ZK Authorization Test failed: ", e); + throw e; + } + } +} diff --git a/testsuite/keycloak-authz-zk-tests/src/test/resources/simplelogger.properties b/testsuite/keycloak-authz-zk-tests/src/test/resources/simplelogger.properties new file mode 100644 index 00000000..53f71fbd --- /dev/null +++ b/testsuite/keycloak-authz-zk-tests/src/test/resources/simplelogger.properties @@ -0,0 +1,2 @@ +org.slf4j.simpleLogger.log.org.apache.kafka=OFF +org.slf4j.simpleLogger.log.io.strimzi=INFO \ No newline at end of file diff --git a/testsuite/keycloak-errors-tests/docker-compose.yml b/testsuite/keycloak-errors-tests/docker-compose.yml index 4639aa0a..c1d518b2 100644 --- a/testsuite/keycloak-errors-tests/docker-compose.yml +++ b/testsuite/keycloak-errors-tests/docker-compose.yml @@ -34,7 +34,7 @@ services: - "9206:9206" - "9207:9207" - "9208:9208" - #- "5006:5006" + - "5006:5006" volumes: - ${PWD}/../docker/target/kafka/libs:/opt/kafka/libs/strimzi - ${PWD}/../docker/kafka/config:/opt/kafka/config/strimzi diff --git a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AdminServerRequestHandler.java b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AdminServerRequestHandler.java index 75c01529..fb5114e1 100644 --- a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AdminServerRequestHandler.java +++ b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AdminServerRequestHandler.java @@ -65,6 +65,9 @@ public void handle(HttpServerRequest req) { } else if (endpoint == Endpoint.REVOCATIONS) { processRevocation(req, path); return; + } else if (endpoint == Endpoint.GRANTS_MAP) { + processGrants(req, path); + return; } if (req.method() == GET) { @@ -181,7 +184,52 @@ private void processUsersRequest(HttpServerRequest req, String[] path) { return; } - verticle.createOrUpdateUser(username, password); + Long expiresIn = json.getLong("expires_in"); + + verticle.createOrUpdateUser(username, new UserInfo(password, expiresIn)); + sendResponse(req, OK); + + } catch (Exception e) { + handleFailure(req, e, log); + } + }); + return; + } + + sendResponse(req, METHOD_NOT_ALLOWED); + } + + private void processGrants(HttpServerRequest req, String[] path) { + if (path.length > 3) { + sendResponse(req, NOT_FOUND); + return; + } + + if (req.method() == GET) { + sendResponse(req, OK, getClientsAsJsonString()); + return; + + } else if (isOneOf(req.method(), POST, PUT)) { + + req.bodyHandler(buffer -> { + try { + log.info(buffer.toString()); + + JsonObject json = buffer.toJsonObject(); + + String token = json.getString("token"); + if (token == null) { + sendResponse(req, BAD_REQUEST, "Required attribute 'token' is null or missing."); + return; + } + + JsonArray grants = json.getJsonArray("grants"); + if (grants == null) { + sendResponse(req, BAD_REQUEST, "Required attribute 'grants' is missing."); + return; + } + + verticle.createOrUpdateGrants(token, grants); sendResponse(req, OK); } catch (Exception e) { @@ -250,9 +298,11 @@ private String getClientsAsJsonString() { private String getUsersAsJsonString() { JsonArray result = new JsonArray(); - for (Map.Entry ent: verticle.getUsers().entrySet()) { + for (Map.Entry ent: verticle.getUsers().entrySet()) { JsonObject json = new JsonObject(); - json.put(ent.getKey(), ent.getValue()); + json.put("username", ent.getKey()); + json.put("password", ent.getValue().password); + json.put("expires_in", ent.getValue().expiresIn); result.add(json); } return result.toString(); diff --git a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AuthServerRequestHandler.java b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AuthServerRequestHandler.java index d74da7d5..7c53164a 100644 --- a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AuthServerRequestHandler.java +++ b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/AuthServerRequestHandler.java @@ -54,6 +54,7 @@ import static io.strimzi.testsuite.oauth.server.Endpoint.TOKEN; import static io.strimzi.testsuite.oauth.server.Endpoint.USERINFO; import static io.strimzi.testsuite.oauth.server.Mode.MODE_200; +import static io.strimzi.testsuite.oauth.server.Mode.MODE_200_DELAYED; import static io.strimzi.testsuite.oauth.server.Mode.MODE_FAILING_500; import static io.strimzi.testsuite.oauth.server.Mode.MODE_JWKS_RSA_WITHOUT_SIG_USE; import static io.strimzi.testsuite.oauth.server.Mode.MODE_JWKS_RSA_WITH_SIG_USE; @@ -95,45 +96,50 @@ public void handle(HttpServerRequest req) { Mode mode = verticle.getMode(endpoint); try { - if (endpoint == Endpoint.JWKS && - isOneOf(mode, MODE_200, MODE_JWKS_RSA_WITH_SIG_USE, MODE_JWKS_RSA_WITHOUT_SIG_USE)) { - processJwksRequest(req, mode); - return; - } else if (endpoint == TOKEN && mode == MODE_200) { - processTokenRequest(req); - return; - } else if (endpoint == FAILING_TOKEN) { - processFailingRequest(req, endpoint, mode, this::processTokenRequest); - return; - } else if (endpoint == INTROSPECT && mode == MODE_200) { - processIntrospectRequest(req); - return; - } else if (endpoint == FAILING_INTROSPECT) { - processFailingRequest(req, endpoint, mode, this::processIntrospectRequest); - return; - } else if (endpoint == USERINFO && mode == MODE_200) { - processUserInfoRequest(req); - return; - } else if (endpoint == FAILING_USERINFO) { - processFailingRequest(req, endpoint, mode, this::processUserInfoRequest); - return; - } else if (endpoint == GRANTS && mode == MODE_200) { - processGrantsRequest(req); - return; - } else if (endpoint == FAILING_GRANTS) { - processFailingRequest(req, endpoint, mode, this::processGrantsRequest); - return; - } - - if (!generateResponse(req, mode)) { - sendResponse(req, OK, "" + verticle.getMode(endpoint)); + if (!processRequest(endpoint, mode, req)) { + if (!generateResponse(req, mode)) { + sendResponse(req, OK, "" + verticle.getMode(endpoint)); + } } - } catch (Throwable t) { handleFailure(req, t, log); } } + private boolean processRequest(Endpoint endpoint, Mode mode, HttpServerRequest req) throws NoSuchAlgorithmException, JOSEException, InterruptedException { + if (endpoint == Endpoint.JWKS && + isOneOf(mode, MODE_200, MODE_JWKS_RSA_WITH_SIG_USE, MODE_JWKS_RSA_WITHOUT_SIG_USE)) { + processJwksRequest(req, mode); + } else if (endpoint == TOKEN && mode == MODE_200) { + processTokenRequest(req); + } else if (endpoint == FAILING_TOKEN) { + processFailingRequest(req, endpoint, mode, this::processTokenRequest); + } else if (endpoint == INTROSPECT && mode == MODE_200) { + processIntrospectRequest(req); + } else if (endpoint == FAILING_INTROSPECT) { + processFailingRequest(req, endpoint, mode, this::processIntrospectRequest); + } else if (endpoint == USERINFO && mode == MODE_200) { + processUserInfoRequest(req); + } else if (endpoint == FAILING_USERINFO) { + processFailingRequest(req, endpoint, mode, this::processUserInfoRequest); + } else if (endpoint == GRANTS && (mode == MODE_200 || mode == MODE_200_DELAYED)) { + if (mode == MODE_200_DELAYED) { + //verticle.getVertx().setTimer(1000, v -> processGrantsRequest(req)); + Thread.sleep(2000); + + processGrantsRequest(req); + } else { + processGrantsRequest(req); + } + } else if (endpoint == FAILING_GRANTS) { + processFailingRequest(req, endpoint, mode, this::processGrantsRequest); + } else { + return false; + } + + return true; + } + private static boolean generateResponse(HttpServerRequest req, Mode mode) { boolean result = true; switch (mode) { @@ -213,14 +219,23 @@ private void processGrantsRequest(HttpServerRequest req) { try { // Create JSON response - JsonArray result = new JsonArray( - "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topic:*\"}," + - "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + - "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]"); - - String jsonString = result.encode(); - sendResponse(req, OK, jsonString); - + JsonArray result = verticle.getGrants().get(token); + if (result == null) { + String jsonString = new JsonArray( + "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topic:*\"}," + + "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + + "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]") + .encode(); + sendResponse(req, OK, jsonString); + + } else { + String jsonString = result.encode(); + if ("[]".equals(jsonString)) { + sendResponse(req, UNAUTHORIZED); + } else { + sendResponse(req, OK, jsonString); + } + } } catch (Throwable t) { handleFailure(req, t, log); } @@ -270,11 +285,14 @@ private void processTokenRequest(HttpServerRequest req) { try { // Create a signed JWT token - String accessToken = createSignedAccessToken(clientId, username); + UserInfo userInfo = username != null ? verticle.getUsers().get(username) : null; + long expiresIn = userInfo != null && userInfo.expiresIn != null ? userInfo.expiresIn : EXPIRES_IN_SECONDS; + + String accessToken = createSignedAccessToken(clientId, username, expiresIn); JsonObject result = new JsonObject(); result.put("access_token", accessToken); - result.put("expires_in", EXPIRES_IN_SECONDS); + result.put("expires_in", expiresIn); result.put("scope", "all"); String jsonString = result.encode(); @@ -442,7 +460,7 @@ private boolean isExpired(int expiryTimeSeconds) { return System.currentTimeMillis() > expiryTimeSeconds * 1000L; } - private String createSignedAccessToken(String clientId, String username) throws JOSEException, NoSuchAlgorithmException { + private String createSignedAccessToken(String clientId, String username, long expiresIn) throws JOSEException, NoSuchAlgorithmException { // Create RSA-signer with the private key JWSSigner signer = new RSASSASigner(verticle.getSigKey()); @@ -451,7 +469,7 @@ private String createSignedAccessToken(String clientId, String username) throws JWTClaimsSet.Builder builder = new JWTClaimsSet.Builder() .subject(username != null ? username : clientId) .issuer("https://mockoauth:8090") - .expirationTime(new Date(System.currentTimeMillis() + EXPIRES_IN_SECONDS * 1000)); + .expirationTime(new Date(System.currentTimeMillis() + expiresIn * 1000)); if (clientId != null) { builder.claim("clientId", clientId); @@ -494,11 +512,11 @@ private String authorizeClient(String authorization) { } private String authorizeUser(String username, String password) { - String pass = verticle.getUsers().get(username); - if (pass == null) { + UserInfo userInfo = verticle.getUsers().get(username); + if (userInfo == null || userInfo.password == null) { return null; } - return pass.equals(password) ? username : null; + return userInfo.password.equals(password) ? username : null; } private void processJwksRequest(HttpServerRequest req, Mode mode) throws NoSuchAlgorithmException, JOSEException { diff --git a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Endpoint.java b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Endpoint.java index 7889f38c..b2534af6 100644 --- a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Endpoint.java +++ b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Endpoint.java @@ -19,6 +19,7 @@ public enum Endpoint { USERS, REVOCATIONS, GRANTS, + GRANTS_MAP, FAILING_GRANTS; public static Endpoint fromString(String value) { diff --git a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/MockOAuthServerMainVerticle.java b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/MockOAuthServerMainVerticle.java index d119e1d0..edd45162 100644 --- a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/MockOAuthServerMainVerticle.java +++ b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/MockOAuthServerMainVerticle.java @@ -12,6 +12,7 @@ import io.vertx.core.Vertx; import io.vertx.core.http.HttpServer; import io.vertx.core.http.HttpServerOptions; +import io.vertx.core.json.JsonArray; import io.vertx.core.net.JksOptions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -139,11 +140,14 @@ public class MockOAuthServerMainVerticle extends AbstractVerticle { private String keystoreExpiredPass; private final Map clients = new HashMap<>(); - private final Map users = new HashMap<>(); + private final Map users = new HashMap<>(); private final Set revokedTokens = new HashSet<>(); + + private final Map grants = new HashMap<>(); + private RSAKey sigKey; - private Map coins = new ConcurrentHashMap<>(); + private final Map coins = new ConcurrentHashMap<>(); public void start() { @@ -290,11 +294,11 @@ Map getClients() { return Collections.unmodifiableMap(clients); } - void createOrUpdateUser(String username, String password) { - users.put(username, password); + void createOrUpdateUser(String username, UserInfo userInfo) { + users.put(username, userInfo); } - Map getUsers() { + Map getUsers() { return Collections.unmodifiableMap(users); } @@ -306,6 +310,14 @@ Set getRevokedTokens() { return Collections.unmodifiableSet(revokedTokens); } + void createOrUpdateGrants(String accessToken, JsonArray value) { + grants.put(accessToken, value); + } + + Map getGrants() { + return Collections.unmodifiableMap(grants); + } + private static String getEnvVar(String name, String defaultValue) { String val = System.getenv(name); return val != null ? val : defaultValue; diff --git a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Mode.java b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Mode.java index 7685269a..0fcedc1f 100644 --- a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Mode.java +++ b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/Mode.java @@ -17,6 +17,7 @@ enum Mode { MODE_FAILING_500, MODE_OFF, MODE_STALL, + MODE_200_DELAYED, MODE_CERT_ONE_ON, MODE_CERT_TWO_ON, MODE_EXPIRED_CERT_ON, diff --git a/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/UserInfo.java b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/UserInfo.java new file mode 100644 index 00000000..792ff41e --- /dev/null +++ b/testsuite/mock-oauth-server/src/main/java/io/strimzi/testsuite/oauth/server/UserInfo.java @@ -0,0 +1,15 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.server; + +class UserInfo { + final String password; + final Long expiresIn; + + public UserInfo(String password, Long expiresIn) { + this.password = password; + this.expiresIn = expiresIn; + } +} diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/TestTokenFactory.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/TestTokenFactory.java new file mode 100644 index 00000000..89683811 --- /dev/null +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/TestTokenFactory.java @@ -0,0 +1,14 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server; + +import io.strimzi.kafka.oauth.common.TokenInfo; + +public class TestTokenFactory { + + public static BearerTokenWithJsonPayload newTokenForUser(TokenInfo tokenInfo) { + return new BearerTokenWithJsonPayload(tokenInfo); + } +} diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/authorizer/TestAuthzUtil.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/authorizer/TestAuthzUtil.java new file mode 100644 index 00000000..119842e9 --- /dev/null +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/kafka/oauth/server/authorizer/TestAuthzUtil.java @@ -0,0 +1,12 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.kafka.oauth.server.authorizer; + +public class TestAuthzUtil { + + public static void clearKeycloakAuthorizerService() { + KeycloakAuthorizerService.clearInstance(); + } +} diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/MockOAuthTests.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/MockOAuthTests.java index 6ef39915..a1b0ae0c 100644 --- a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/MockOAuthTests.java +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/MockOAuthTests.java @@ -14,6 +14,7 @@ import io.strimzi.testsuite.oauth.mockoauth.PasswordAuthTest; import io.strimzi.testsuite.oauth.mockoauth.RetriesTests; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -23,8 +24,14 @@ import org.testcontainers.containers.wait.strategy.Wait; import java.io.File; +import java.io.IOException; import java.time.Duration; +/** + * Some tests rely on resources/simplelogger.properties to be configured to log to the file target/test.log. + *

      + * Log output is analyzed in the test to make sure the behaviour is as expected. + */ public class MockOAuthTests { @ClassRule @@ -41,10 +48,16 @@ public class MockOAuthTests { private static final Logger log = LoggerFactory.getLogger(MockOAuthTests.class); + @BeforeClass + public static void staticInit() throws IOException { + KeycloakAuthorizerTest.staticInit(); + } + @Test public void runTests() throws Exception { try { String kafkaContainer = environment.getContainerByServiceName("kafka_1").get().getContainerInfo().getName().substring(1); + System.out.println("See log at: " + new File("target/test.log").getAbsolutePath()); logStart("MetricsTest :: Basic Metrics Tests"); new MetricsTest().doTest(); @@ -64,8 +77,8 @@ public void runTests() throws Exception { logStart("RetriesTests :: Authentication HTTP Retries Tests"); new RetriesTests(kafkaContainer).doTests(); - logStart("KeycloakAuthorizerTest :: Grants HTTP Retries Tests"); - new KeycloakAuthorizerTest().doHttpRetriesTest(); + // Keycloak authorizer tests + new KeycloakAuthorizerTest().doTests(); } catch (Throwable e) { log.error("Exception has occurred: ", e); diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/Common.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/Common.java index 2c4e890d..73d35771 100644 --- a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/Common.java +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/Common.java @@ -14,9 +14,7 @@ import io.strimzi.kafka.oauth.common.TimeUtil; import io.strimzi.kafka.oauth.common.TokenInfo; import io.strimzi.testsuite.oauth.metrics.Metrics; -import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.junit.Assert; @@ -34,8 +32,6 @@ import java.util.Properties; import java.util.Set; -import static io.strimzi.kafka.oauth.common.OAuthAuthenticator.urlencode; - public class Common { static final String WWW_FORM_CONTENT_TYPE = "application/x-www-form-urlencoded"; @@ -63,12 +59,6 @@ static void setCommonProducerProperties(String kafkaBootstrap, Properties p) { p.setProperty(ProducerConfig.RETRIES_CONFIG, "0"); } - public static Properties buildConsumerConfigOAuthBearer(String kafkaBootstrap, Map oauthConfig) { - Properties p = buildCommonConfigOAuthBearer(oauthConfig); - setCommonConsumerProperties(kafkaBootstrap, p); - return p; - } - static Properties buildCommonConfigOAuthBearer(Map oauthConfig) { String configOptions = getJaasConfigOptionsString(oauthConfig); @@ -87,27 +77,6 @@ public static Properties buildProducerConfigPlain(String kafkaBootstrap, Map scramConfig) { - Properties p = buildCommonConfigScram(scramConfig); - setCommonProducerProperties(kafkaBootstrap, p); - return p; - } - - public static Properties buildConsumerConfigPlain(String kafkaBootstrap, Map plainConfig) { - Properties p = buildCommonConfigPlain(plainConfig); - setCommonConsumerProperties(kafkaBootstrap, p); - return p; - } - - static void setCommonConsumerProperties(String kafkaBootstrap, Properties p) { - p.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrap); - p.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - p.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - p.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "consumer-group"); - p.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10"); - p.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); - } - static Properties buildCommonConfigPlain(Map plainConfig) { String configOptions = getJaasConfigOptionsString(plainConfig); @@ -118,53 +87,6 @@ static Properties buildCommonConfigPlain(Map plainConfig) { return p; } - static Properties buildCommonConfigScram(Map scramConfig) { - String configOptions = getJaasConfigOptionsString(scramConfig); - - Properties p = new Properties(); - p.setProperty("security.protocol", "SASL_PLAINTEXT"); - p.setProperty("sasl.mechanism", "SCRAM-SHA-512"); - p.setProperty("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required " + configOptions + " ;"); - return p; - } - - static String loginWithUsernameForRefreshToken(URI tokenEndpointUri, String username, String password, String clientId) throws IOException { - - JsonNode result = HttpUtil.post(tokenEndpointUri, - null, - null, - null, - WWW_FORM_CONTENT_TYPE, - "grant_type=password&username=" + username + "&password=" + password + "&client_id=" + clientId, - JsonNode.class); - - JsonNode token = result.get("refresh_token"); - if (token == null) { - throw new IllegalStateException("Invalid response from authorization server: no refresh_token"); - } - return token.asText(); - } - - static String loginWithUsernamePassword(URI tokenEndpointUri, String username, String password, String clientId) throws IOException { - - String body = "grant_type=password&username=" + urlencode(username) + - "&password=" + urlencode(password) + "&client_id=" + urlencode(clientId); - - JsonNode result = HttpUtil.post(tokenEndpointUri, - null, - null, - null, - WWW_FORM_CONTENT_TYPE, - body, - JsonNode.class); - - JsonNode token = result.get("access_token"); - if (token == null) { - throw new IllegalStateException("Invalid response from authorization server: no access_token"); - } - return token.asText(); - } - static String loginWithClientSecret(String tokenEndpoint, String clientId, String secret, String truststorePath, String truststorePass) throws IOException { TokenInfo tokenInfo = OAuthAuthenticator.loginWithClientSecret( URI.create(tokenEndpoint), @@ -259,6 +181,13 @@ public static void createOAuthUser(String username, String password) throws IOEx "{\"username\": \"" + username + "\", \"password\": \"" + password + "\"}", String.class); } + public static void createOAuthUser(String username, String password, long expiresInSeconds) throws IOException { + HttpUtil.post(URI.create("http://mockoauth:8091/admin/users"), + null, + "application/json", + "{\"username\": \"" + username + "\", \"password\": \"" + password + "\", \"expires_in\": " + expiresInSeconds + "}", String.class); + } + public static void revokeToken(String token) throws IOException { HttpUtil.post(URI.create("http://mockoauth:8091/admin/revocations"), null, @@ -266,6 +195,13 @@ public static void revokeToken(String token) throws IOException { "{\"token\": \"" + token + "\"}", String.class); } + public static void addGrantsForToken(String token, String grants) throws IOException { + HttpUtil.post(URI.create("http://mockoauth:8091/admin/grants_map"), + null, + "application/json", + "{\"token\": \"" + token + "\", \"grants\": " + grants + "}", String.class); + } + public static Metrics reloadMetrics() throws IOException { return getPrometheusMetrics(URI.create("http://kafka:9404/metrics")); } @@ -284,7 +220,7 @@ public static String getProjectRoot() { static class MockBearerTokenWithPayload implements BearerTokenWithPayload { private final TokenInfo ti; - private Object payload; + private JsonNode payload; MockBearerTokenWithPayload(TokenInfo ti) { if (ti == null) { @@ -294,12 +230,12 @@ static class MockBearerTokenWithPayload implements BearerTokenWithPayload { } @Override - public Object getPayload() { + public JsonNode getPayload() { return payload; } @Override - public void setPayload(Object value) { + public void setPayload(JsonNode value) { payload = value; } @@ -309,7 +245,7 @@ public Set getGroups() { } @Override - public ObjectNode getJSON() { + public ObjectNode getClaimsJSON() { return ti.payload(); } diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/ConnectTimeoutTests.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/ConnectTimeoutTests.java index 48398dfb..8e7db881 100644 --- a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/ConnectTimeoutTests.java +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/ConnectTimeoutTests.java @@ -92,7 +92,7 @@ private void connectAuthServerWithTimeout() throws Exception { Properties producerProps = buildProducerConfigOAuthBearer(kafkaBootstrap, oauthConfig); long start = System.currentTimeMillis(); - try (KafkaProducer p = new KafkaProducer<>(producerProps)) { + try (KafkaProducer ignored = new KafkaProducer<>(producerProps)) { Assert.fail("Should fail with KafkaException"); } catch (Exception e) { @@ -102,6 +102,7 @@ private void connectAuthServerWithTimeout() throws Exception { Assert.assertTrue("Unexpected diff: " + diff, diff > timeoutOverride * 1000 && diff < timeoutOverride * 1000 + 1000); } finally { changeAuthServerMode("token", "mode_200"); + System.clearProperty("oauth.read.timeout.seconds"); } } diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/KeycloakAuthorizerTest.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/KeycloakAuthorizerTest.java index 9947828b..040c8a24 100644 --- a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/KeycloakAuthorizerTest.java +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/KeycloakAuthorizerTest.java @@ -4,30 +4,60 @@ */ package io.strimzi.testsuite.oauth.mockoauth; +import io.strimzi.kafka.oauth.common.BearerTokenWithPayload; +import io.strimzi.kafka.oauth.common.ConfigException; import io.strimzi.kafka.oauth.common.OAuthAuthenticator; import io.strimzi.kafka.oauth.common.PrincipalExtractor; import io.strimzi.kafka.oauth.common.SSLUtil; import io.strimzi.kafka.oauth.common.TokenInfo; +import io.strimzi.kafka.oauth.server.JaasServerOauthValidatorCallbackHandler; import io.strimzi.kafka.oauth.server.OAuthKafkaPrincipal; +import io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder; +import io.strimzi.kafka.oauth.server.ServerConfig; +import io.strimzi.kafka.oauth.server.TestTokenFactory; +import io.strimzi.kafka.oauth.server.authorizer.AuthzConfig; +import io.strimzi.kafka.oauth.server.authorizer.KeycloakAuthorizer; import io.strimzi.kafka.oauth.server.authorizer.KeycloakRBACAuthorizer; +import io.strimzi.kafka.oauth.server.authorizer.TestAuthzUtil; +import io.strimzi.testsuite.oauth.common.TestUtil; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; +import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.kafka.common.security.auth.SaslAuthenticationContext; import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; +import org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback; +import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerSaslServer; import org.apache.kafka.server.authorizer.Action; import org.apache.kafka.server.authorizer.AuthorizableRequestContext; import org.apache.kafka.server.authorizer.AuthorizationResult; -import org.jetbrains.annotations.NotNull; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.login.AppConfigurationEntry; import java.io.IOException; import java.net.URI; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import static io.strimzi.testsuite.oauth.mockoauth.Common.addGrantsForToken; import static io.strimzi.testsuite.oauth.mockoauth.Common.changeAuthServerMode; import static io.strimzi.testsuite.oauth.mockoauth.Common.createOAuthClient; import static io.strimzi.testsuite.oauth.mockoauth.Common.createOAuthUser; @@ -36,38 +66,855 @@ public class KeycloakAuthorizerTest { - String clientCli = "kafka-cli"; + private static final Logger LOG = LoggerFactory.getLogger(KeycloakAuthorizerTest.class); - String userAlice = "alice"; - String userAlicePass = "alice-password"; + static final int LOOP_PAUSE_MS = 1000; + static final int TIMEOUT_SECONDS = 30; - String truststorePath = "../docker/target/kafka/certs/ca-truststore.p12"; - String truststorePass = "changeit"; - String tokenEndpoint = "https://mockoauth:8090/failing_token"; + static final String LOG_PATH = "target/test.log"; - public void doHttpRetriesTest() throws IOException { + static final String CLIENT_CLI = "kafka-cli"; + static final String USER_ALICE = "alice"; + static final String USER_ALICE_PASS = "alice-password"; + + final static String TRUSTSTORE_PATH = "../docker/target/kafka/certs/ca-truststore.p12"; + final static String TRUSTSTORE_PASS = "changeit"; + + final static String TOKEN_ENDPOINT = "https://mockoauth:8090/token"; + final static String FAILING_TOKEN_ENDPOINT = "https://mockoauth:8090/failing_token"; + + final static String GRANTS_ENDPOINT = "https://mockoauth:8090/grants"; + + final static String JWKS_ENDPOINT = "https://mockoauth:8090/jwks"; + final static String VALID_ISSUER_URI = "https://mockoauth:8090"; + + + final static String CLIENT_SRV = "kafka"; + final static String CLIENT_SRV_SECRET = "kafka-secret"; + + public static void staticInit() throws IOException { // create a client for resource server - String clientSrv = "kafka"; - String clientSrvSecret = "kafka-secret"; - createOAuthClient(clientSrv, clientSrvSecret); + createOAuthClient(CLIENT_SRV, CLIENT_SRV_SECRET); // create a client for user's client agent - createOAuthClient(clientCli, ""); + createOAuthClient(CLIENT_CLI, ""); // create a user alice - createOAuthUser(userAlice, userAlicePass); + createOAuthUser(USER_ALICE, USER_ALICE_PASS); + } + + public void doTests() throws Exception { + doConfigTests(); + doMalformedGrantsTests(); + doGrantsSemanticEqualsTest(); + doHttpRetriesTest(); + doConcurrentGrantsRefreshTest(); + doGrantsGCTests(); + doGrants403Test(); + doGrants401Test(); + doSingletonTest(); + } + + void doGrants401Test() throws IOException, InterruptedException, TimeoutException { + logStart("KeycloakAuthorizerTest :: Grants 401 (invalid token) Test"); + + changeAuthServerMode("token", "MODE_200"); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + logReader.readNext(); + + List lines; + HashMap props = configureAuthorizer(); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, "https://mockoauth:8090/grants"); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, "4"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(props); + + // authentication + TokenInfo tokenInfo = login(TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 1); + + // simulate an authenticated session + changeAuthServerMode("jwks", "MODE_200"); + + // configure the authentication handler + AuthenticateCallbackHandler authHandler = configureJwtSignatureValidator(); + + LOG.info("Authenticate (validate) as alice"); + OAuthKafkaPrincipal principal = authenticate(authHandler, tokenInfo); + + + // authorization + AuthorizableRequestContext ctx = newAuthorizableRequestContext(principal); + + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL), + 1, true, true)); + + List result = authorizer.authorize(ctx, actions); + Assert.assertNotNull("Authorizer has to return non-null", result); + Assert.assertEquals("Authorizer has to return as many results as it received inputs", result.size(), actions.size()); + Assert.assertEquals("Authorizer should return ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + + lines = logReader.readNext(); + Assert.assertTrue("Saving non-null grants", checkLogForRegex(lines, "Saving non-null grants")); + + // Switch grants endpoint to 401 mode + changeAuthServerMode("grants", "MODE_401"); + + LOG.info("Waiting for: Done refreshing"); // Make sure to not repeat the below condition in the string here + lines = waitFor(logReader, "Done refreshing grants"); + Assert.assertTrue("Failed to fetch", checkLogForRegex(lines, "Failed to fetch grants .* status 401")); + Assert.assertTrue("Removed user from grants cache", checkLogForRegex(lines, "Removed user from grants cache: alice")); + Assert.assertTrue("Removed invalid session", checkLogForRegex(lines, "Removed invalid session from sessions map \\(userId: alice")); + + } finally { + changeAuthServerMode("grants", "MODE_200"); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + void doGrants403Test() throws IOException { + logStart("KeycloakAuthorizerTest :: Grants 403 (no policies for user) Test"); + + // Switch grants endpoint to 403 mode + changeAuthServerMode("grants", "MODE_403"); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + logReader.readNext(); + + List lines; + HashMap props = configureAuthorizer(); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, "https://mockoauth:8090/grants"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(props); + + TokenInfo tokenInfo = login(FAILING_TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 1); + KafkaPrincipal principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, USER_ALICE, new Common.MockBearerTokenWithPayload(tokenInfo)); + AuthorizableRequestContext ctx = newAuthorizableRequestContext(principal); + + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL), + 1, true, true)); + + List result = authorizer.authorize(ctx, actions); + Assert.assertNotNull("Authorizer has to return non-null", result); + Assert.assertEquals("Authorizer has to return as many results as it received inputs", result.size(), actions.size()); + Assert.assertEquals("Authorizer should return DENIED", AuthorizationResult.DENIED, result.get(0)); + + lines = logReader.readNext(); + + Assert.assertTrue("Saving non-null grants", checkLogForRegex(lines, "Saving non-null grants")); + Assert.assertTrue("grants for user: {}", checkLogForRegex(lines, "grants for .*: \\{\\}")); + + } finally { + changeAuthServerMode("grants", "MODE_200"); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + void doHttpRetriesTest() throws IOException { + logStart("KeycloakAuthorizerTest :: Grants HTTP Retries Tests"); changeAuthServerMode("token", "MODE_200"); changeAuthServerMode("failing_token", "MODE_400"); + HashMap props = configureAuthorizer(); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, "https://mockoauth:8090/failing_grants"); + + try (KeycloakRBACAuthorizer authorizer = new KeycloakRBACAuthorizer()) { + authorizer.configure(props); + + try { + login(FAILING_TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 0); + + Assert.fail("Should have failed while logging in with password"); + + } catch (Exception expected) { + login(FAILING_TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 0); + } + + // Now try again + TokenInfo tokenInfo = login(FAILING_TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 1); + KafkaPrincipal principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, USER_ALICE, new Common.MockBearerTokenWithPayload(tokenInfo)); + AuthorizableRequestContext ctx = newAuthorizableRequestContext(principal); + + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL), + 1, true, true)); + + List result = authorizer.authorize(ctx, actions); + Assert.assertNotNull("Authorizer has to return non-null", result); + Assert.assertEquals("Authorizer has to return as many results as it received inputs", result.size(), actions.size()); + Assert.assertEquals("Authorizer should return ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + /** + * This test makes sure the concurrent threads needing grants for the same user that is not yet available in grants cache + * result in a single request to the Keycloak server, with second thread waiting for result and reusing it. + * + * @throws IOException If an exception occurs during I/O operation + * @throws ExecutionException If an exception occurs during job execution + * @throws InterruptedException If test is interrupted + */ + void doConcurrentGrantsRefreshTest() throws IOException, ExecutionException, InterruptedException { + logStart("KeycloakAuthorizerTest :: Concurrent Grants Refresh Tests"); + + // create a test user 'user1' + String userOne = "user1"; + String userOnePass = "user1-password"; + createOAuthUser(userOne, userOnePass); + + changeAuthServerMode("token", "MODE_200"); + changeAuthServerMode("failing_token", "MODE_400"); + + // grants endpoint has to be configured to respond with a 1s delay + changeAuthServerMode("grants", "MODE_200_DELAYED"); + + // one test uses KeycloakAuthorizer not configured with 'strimzi.authorization.reuse.grants' + HashMap props = configureAuthorizer(); + runConcurrentFetchGrantsTest(props, true, userOne, userOnePass); + + // another test uses KeycloakAuthorizer configured with it set to 'false' + props.put("strimzi.authorization.reuse.grants", "false"); + runConcurrentFetchGrantsTest(props, false, userOne, userOnePass); + } + + private void runConcurrentFetchGrantsTest(HashMap props, boolean withReuse, String user, String userPass) throws IOException, ExecutionException, InterruptedException { + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(props); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + List lines = logReader.readNext(); + + if (withReuse) { + Assert.assertTrue("reuseGrants should be true", checkLogForRegex(lines, "reuseGrants: true")); + } else { + Assert.assertTrue("reuseGrants should default to false", checkLogForRegex(lines, "reuseGrants: false")); + } + + TokenInfo tokenInfo = login(FAILING_TOKEN_ENDPOINT, user, userPass, 1); + OAuthKafkaPrincipal principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, user, TestTokenFactory.newTokenForUser(tokenInfo)); + + addGrantsForToken(tokenInfo.token(), "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topic:my-topic*\"}," + + "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + + "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]"); + + AuthorizableRequestContext ctx = newAuthorizableRequestContext(principal); + + + ExecutorService executorService = Executors.newFixedThreadPool(2); + + try { + Assert.assertNull("payload should not be set yet: " + principal.getJwt().getPayload(), principal.getJwt().getPayload()); + + // In two parallel threads invoke authorize() passing packaged principal + Future> future = submitAuthorizationCall(authorizer, ctx, executorService, "my-topic"); + Future> future2 = submitAuthorizationCall(authorizer, ctx, executorService, "my-topic-2"); + + List result = future.get(); + List result2 = future2.get(); + + // Check log output for signs of semaphore doing its job + // and only fetching the grants once, then reusing the fetched grants by the other thread + // It's the same whether reuseGrants is true or false - because concurrent requests are perceived by user to occur at the same time + lines = logReader.readNext(); + + Assert.assertEquals("One thread fetches grants", 1, countLogForRegex(lines, "Fetching grants from Keycloak for user user1")); + Assert.assertEquals("One thread waits", 1, countLogForRegex(lines, "Waiting on another thread to get grants")); + Assert.assertEquals("One grants fetch", 1, countLogForRegex(lines, "Response body for POST https://mockoauth:8090/grants")); + + // Check the authorization result + Assert.assertEquals("One result for my-topic action", 1, result.size()); + Assert.assertEquals("One result for my-topic-2 action", 1, result2.size()); + Assert.assertEquals("my-topic ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + Assert.assertEquals("my-topic-2 ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + + if (!withReuse) { + // Check that the BearerTokenWithJsonPayload has a payload + // That only gets set in no-reuse regime in order to be able to determine if grants were refreshed for the session + Assert.assertNotNull("payload should be set now: " + principal.getJwt().getPayload(), principal.getJwt().getPayload()); + } + + + // Perform another authorization - grants should be retrieved directly from grants cache, + // even if reuseGrants is false, because it's not a new session anymore + future = submitAuthorizationCall(authorizer, ctx, executorService, "x-topic-1"); + result = future.get(); + + // check log from last checkpoint on + lines = logReader.readNext(); + + Assert.assertEquals("No grants fetch", 0, countLogForRegex(lines, "Response body for POST https://mockoauth:8090/grants")); + + // Check the authorization result + Assert.assertEquals("One result for x-topic-1 action", 1, result.size()); + Assert.assertEquals("x-topic-1 DENIED", AuthorizationResult.DENIED, result.get(0)); + + // Create a new Principal object for the same user + // Perform another authorization - grants should be fetched if reuseGrants is false + principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, user, TestTokenFactory.newTokenForUser(tokenInfo)); + ctx = newAuthorizableRequestContext(principal); + + future = submitAuthorizationCall(authorizer, ctx, executorService, "x-topic-2"); + result = future.get(); + + lines = logReader.readNext(); + if (!withReuse) { + // Check that grants have been fetched + Assert.assertEquals("Grants fetched", 1, countLogForRegex(lines, "Response body for POST https://mockoauth:8090/grants")); + } else { + // Check that grants have not been fetched again + Assert.assertEquals("Grants not fetched", 0, countLogForRegex(lines, "Response body for POST https://mockoauth:8090/grants")); + } + + // Check the authorization result + Assert.assertEquals("One result for x-topic-2 action", 1, result.size()); + Assert.assertEquals("x-topic-2 DENIED", AuthorizationResult.DENIED, result.get(0)); + } finally { + executorService.shutdown(); + } + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + void doConfigTests() throws IOException { + logStart("KeycloakAuthorizerTest :: Config Tests"); + + HashMap config = new HashMap<>(); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + try { + authorizer.configure(config); + Assert.fail("Should have failed"); + } catch (ConfigException e) { + Assert.assertTrue("'principal.builder.class' is missing", e.getMessage().contains("requires io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder as 'principal.builder.class'")); + } + } + config.put("principal.builder.class", "io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + try { + authorizer.configure(config); + Assert.fail("Should have failed"); + } catch (ConfigException e) { + Assert.assertTrue("'strimzi.authorization.token.endpoint.uri' is missing", e.getMessage().contains("Token Endpoint ('strimzi.authorization.token.endpoint.uri') not set")); + } + } + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_TOKEN_ENDPOINT_URI, GRANTS_ENDPOINT); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + try { + authorizer.configure(config); + Assert.fail("Should have failed"); + } catch (ConfigException e) { + Assert.assertTrue("'strimzi.authorization.client.id' is missing", e.getMessage().contains("client id ('strimzi.authorization.client.id') not set")); + } + } + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_CLIENT_ID, "kafka"); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + + // Position to the end of the existing log file + logReader.readNext(); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(config); + } + + List lines = logReader.readNext(); + + // Check the defaults + Assert.assertEquals("tokenEndpointUri: https://mockoauth:8090/grants", 1, countLogForRegex(lines, "tokenEndpointUri: https://mockoauth:8090/grants")); + Assert.assertEquals("clientId: kafka", 1, countLogForRegex(lines, "clientId: kafka")); + Assert.assertEquals("sslSocketFactory: null", 1, countLogForRegex(lines, "sslSocketFactory: null")); + Assert.assertEquals("hostnameVerifier: null", 1, countLogForRegex(lines, "hostnameVerifier: null")); + Assert.assertEquals("clusterName: kafka-cluster", 1, countLogForRegex(lines, "clusterName: kafka-cluster")); + Assert.assertEquals("delegateToKafkaACL: false", 1, countLogForRegex(lines, "delegateToKafkaACL: false")); + Assert.assertEquals("superUsers: []", 1, countLogForRegex(lines, "superUsers: \\[\\]")); + Assert.assertEquals("grantsRefreshPeriodSeconds: 60", 1, countLogForRegex(lines, "grantsRefreshPeriodSeconds: 60")); + Assert.assertEquals("grantsRefreshPoolSize: 5", 1, countLogForRegex(lines, "grantsRefreshPoolSize: 5")); + Assert.assertEquals("grantsMaxIdleTimeSeconds: 300", 1, countLogForRegex(lines, "grantsMaxIdleTimeSeconds: 300")); + Assert.assertEquals("httpRetries: 0", 1, countLogForRegex(lines, "httpRetries: 0")); + Assert.assertEquals("reuseGrants: true", 1, countLogForRegex(lines, "reuseGrants: true")); + Assert.assertEquals("connectTimeoutSeconds: 60", 1, countLogForRegex(lines, "connectTimeoutSeconds: 60")); + Assert.assertEquals("readTimeoutSeconds: 60", 1, countLogForRegex(lines, "readTimeoutSeconds: 60")); + Assert.assertEquals("enableMetrics: false", 1, countLogForRegex(lines, "enableMetrics: false")); + Assert.assertEquals("gcPeriodSeconds: 300", 1, countLogForRegex(lines, "gcPeriodSeconds: 300")); + + // Custom config + + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_KAFKA_CLUSTER_NAME, "cluster1"); + config.put("super.users", "User:admin;User:service-account-kafka"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, "180"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_POOL_SIZE, "3"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_MAX_IDLE_TIME_SECONDS, "30"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_HTTP_RETRIES, "2"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_REUSE_GRANTS, "false"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_CONNECT_TIMEOUT_SECONDS, "15"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_READ_TIMEOUT_SECONDS, "15"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_ENABLE_METRICS, "true"); + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS, "60"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + try { + authorizer.configure(config); + Assert.fail("Should have failed"); + } catch (ConfigException e) { + Assert.assertEquals("Only one instance per JVM", "Only one authorizer configuration per JVM is supported", e.getMessage()); + } + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(config); + } + + lines = logReader.readNext(); + + Assert.assertEquals("clusterName: cluster1", 1, countLogForRegex(lines, "clusterName: cluster1")); + Assert.assertEquals("superUsers: ['User:admin', 'User:service-account-kafka']", 1, countLogForRegex(lines, "superUsers: \\['User:admin', 'User:service-account-kafka'\\]")); + Assert.assertEquals("grantsRefreshPeriodSeconds: 180", 1, countLogForRegex(lines, "grantsRefreshPeriodSeconds: 180")); + Assert.assertEquals("grantsRefreshPoolSize: 3", 1, countLogForRegex(lines, "grantsRefreshPoolSize: 3")); + Assert.assertEquals("grantsMaxIdleTimeSeconds: 30", 1, countLogForRegex(lines, "grantsMaxIdleTimeSeconds: 30")); + Assert.assertEquals("httpRetries: 2", 1, countLogForRegex(lines, "httpRetries: 2")); + Assert.assertEquals("reuseGrants: false", 1, countLogForRegex(lines, "reuseGrants: false")); + Assert.assertEquals("connectTimeoutSeconds: 15", 1, countLogForRegex(lines, "connectTimeoutSeconds: 15")); + Assert.assertEquals("readTimeoutSeconds: 15", 1, countLogForRegex(lines, "readTimeoutSeconds: 15")); + Assert.assertEquals("enableMetrics: true", 1, countLogForRegex(lines, "enableMetrics: true")); + Assert.assertEquals("gcPeriodSeconds: 60", 1, countLogForRegex(lines, "gcPeriodSeconds: 60")); + + + // test gcPeriodSeconds set to 0 + config.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS, "0"); + + TestAuthzUtil.clearKeycloakAuthorizerService(); + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(config); + } + + lines = logReader.readNext(); + + Assert.assertEquals("gcPeriodSeconds invalid value: 0", 1, countLogForRegex(lines, "'strimzi.authorization.grants.gc.period.seconds' set to invalid value: 0, using the default value: 300 seconds")); + Assert.assertEquals("gcPeriodSeconds: 300", 1, countLogForRegex(lines, "gcPeriodSeconds: 300")); + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + void doGrantsGCTests() throws Exception { + logStart("KeycloakAuthorizerTest :: Grants Garbage Collection Tests"); + + // Hold on to the created principals to prevent JVM gc() clearing the sessions + List principals = new LinkedList<>(); + + // make sure the token endpoint works fine + changeAuthServerMode("token", "MODE_200"); + + // Make sure grants endpoint is set to normal mode 200 + changeAuthServerMode("grants", "MODE_200"); + + String userOne = "gcUser1"; + String userOnePass = "gcUser1-password"; + createOAuthUser(userOne, userOnePass); + + // Set gcPeriodSeconds to 3 seconds + HashMap props = configureAuthorizer(); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, "5"); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_GC_PERIOD_SECONDS, "3"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(props); + + // Perform authentications and authorizations with different access tokens for the same user + // That will populate the grantsCache map with a single entry and update already existing entry with the latest access token + + // authentication + TokenInfo tokenInfo = login(TOKEN_ENDPOINT, userOne, userOnePass, 0); + + // simulate an authenticated session + changeAuthServerMode("jwks", "MODE_200"); + + // configure the authentication handler + AuthenticateCallbackHandler authHandler = configureJwtSignatureValidator(); + + + // check the logs for updated access token + LogLineReader logReader = new LogLineReader(LOG_PATH); + + // wait for cgGrants run on 0 users + LOG.info("Waiting for: active users count: 0"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Grants gc: active users count: 0"); + + LOG.info("Authenticate (validate) as gcUser1"); + OAuthKafkaPrincipal principal = authenticate(authHandler, tokenInfo); + principals.add(principal); + + // authorization + + AuthorizableRequestContext authzContext = newAuthorizableRequestContext(principal); + + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL), + 1, true, true)); + + // perform authorization for the session + LOG.info("Call authorize() as gcUser1"); + List result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + + // check the logs for updated access token + List lines = logReader.readNext(); + Assert.assertEquals("Fetch grants", 1, countLogForRegex(lines, "Fetching grants from Keycloak for user gcUser1")); + + + String userTwo = "gcUser2"; + String userTwoPass = "gcUser2-password"; + createOAuthUser(userTwo, userTwoPass, 8); + + // Create a short-lived access token for a new user, that only has a 5 seconds lifetime + // This allows us to test if after 5 seconds the triggered gc job cleans the cache, due to the expired token + tokenInfo = login(TOKEN_ENDPOINT, userTwo, userTwoPass, 0); + + LOG.info("Authenticate (validate) gcUser2"); + principal = authenticate(authHandler, tokenInfo); + principals.add(principal); + + LOG.info("Waiting for: active users count: 2, grantsCache size before: 1, grantsCache size after: 1"); // Make sure to not repeat the below condition in the string here + // wait for cgGrants run on 2 users + waitFor(logReader, "Grants gc: active users count: 2, grantsCache size before: 1, grantsCache size after: 1"); + + + authzContext = newAuthorizableRequestContext(principal); + + LOG.info("Call authorize() as gcUser2"); + result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + + // wait for cgGrants run on 2 users and two grants cache entries + LOG.info("Waiting for: active users count: 2, grantsCache size before: 2, grantsCache size after: 2"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Grants gc: active users count: 2, grantsCache size before: 2, grantsCache size after: 2"); + + + // now wait for token to expire for gcUser2 + LOG.info("Waiting for: active users count: 1, grantsCache size before: 2, grantsCache size after: 1"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Grants gc: active users count: 1, grantsCache size before: 2, grantsCache size after: 1"); + + + // authorization should now fail since the token has expired + LOG.info("Authorize another action for gcUser2"); + result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: DENIED", AuthorizationResult.DENIED, result.get(0)); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + private OAuthKafkaPrincipal authenticate(AuthenticateCallbackHandler authHandler, TokenInfo tokenInfo) throws IOException { + + // authenticate with the raw access token, get BearerTokenWithPayload that represents the session + BearerTokenWithPayload tokenWithPayload = null; + try { + tokenWithPayload = (BearerTokenWithPayload) authenticate(authHandler, tokenInfo.token()); + } catch (UnsupportedCallbackException e) { + Assert.fail("Test error - should never happen: " + e); + } + + // mock up the authentication workflow part that creates the OAuthKafkaPrincipal + OAuthKafkaPrincipalBuilder principalBuilder = new OAuthKafkaPrincipalBuilder(); + principalBuilder.configure(new HashMap<>()); + + OAuthBearerSaslServer saslServer = mock(OAuthBearerSaslServer.class); + when(saslServer.getMechanismName()).thenReturn("OAUTHBEARER"); + when(saslServer.getAuthorizationID()).thenReturn(tokenInfo.principal()); + when(saslServer.getNegotiatedProperty("OAUTHBEARER.token")).thenReturn(tokenWithPayload); + + SaslAuthenticationContext authContext = mock(SaslAuthenticationContext.class); + when(authContext.server()).thenReturn(saslServer); + + return (OAuthKafkaPrincipal) principalBuilder.build(authContext); + } + + private AuthenticateCallbackHandler configureJwtSignatureValidator() { + //JWTSignatureValidator validator = new JWTSignatureValidator("test-validator", JWKS_ENDPOINT, ) + JaasServerOauthValidatorCallbackHandler authHandler = new JaasServerOauthValidatorCallbackHandler(); + Map jaasProps = new HashMap<>(); + jaasProps.put(ServerConfig.OAUTH_JWKS_ENDPOINT_URI, JWKS_ENDPOINT); + jaasProps.put(ServerConfig.OAUTH_SSL_TRUSTSTORE_LOCATION, TRUSTSTORE_PATH); + jaasProps.put(ServerConfig.OAUTH_SSL_TRUSTSTORE_PASSWORD, TRUSTSTORE_PASS); + jaasProps.put(ServerConfig.OAUTH_VALID_ISSUER_URI, VALID_ISSUER_URI); + jaasProps.put(ServerConfig.OAUTH_CHECK_ACCESS_TOKEN_TYPE, "false"); + + Map configs = new HashMap<>(); + authHandler.configure(configs, "OAUTHBEARER", Collections.singletonList(new AppConfigurationEntry("server", AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, jaasProps))); + return authHandler; + } + + private OAuthBearerToken authenticate(AuthenticateCallbackHandler callbackHandler, String accessToken) throws IOException, UnsupportedCallbackException { + OAuthBearerValidatorCallback callback = new OAuthBearerValidatorCallback(accessToken); + Callback[] callbacks = new Callback[] {callback}; + callbackHandler.handle(callbacks); + return callback.token(); + } + + /** + * Test for the handling of improperly configured Authorization Services + */ + void doMalformedGrantsTests() throws IOException, InterruptedException, TimeoutException { + logStart("KeycloakAuthorizerTest :: Malformed Grants Tests"); + + // make sure the token endpoint works fine + changeAuthServerMode("token", "MODE_200"); + + // login as some user - alice in our case, and get the token + TokenInfo tokenInfo = login(TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 0); + OAuthKafkaPrincipal principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, USER_ALICE, TestTokenFactory.newTokenForUser(tokenInfo)); + + // Mistyped resource type 'Topc' instead of 'Topic' + addGrantsForToken(tokenInfo.token(), "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topc:my-topic*\"}," + + "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + + "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]"); + + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL), + 1, true, true)); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + // seek to the end of log file + logReader.readNext(); + + HashMap props = configureAuthorizer(); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, "2"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(props); + + + AuthorizableRequestContext authzContext = newAuthorizableRequestContext(principal); + + LOG.info("Call authorize() - test grants record with invalid resource type 'Topc'"); + List result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: DENIED", AuthorizationResult.DENIED, result.get(0)); + + // This is a first authorize() call on the KeycloakAuthorizer -> the grantsCache is empty + LOG.info("Waiting for: unsupported segment type: Topc"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Failed to parse .* unsupported segment type: Topc"); + + + // malformed resource spec - no ':' in Topic;my-topic* + addGrantsForToken(tokenInfo.token(), "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topic;my-topic*\"}," + + "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + + "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]"); + + // wait for grants refresh + LOG.info("Waiting for: Done refreshing grants"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Response body .*Topic;my-topic"); + + + LOG.info("Call authorize() - test grants record with malformed resource spec 'Topic;my-topic*' (no ':')"); + result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: DENIED", AuthorizationResult.DENIED, result.get(0)); + + LOG.info("Waiting for: doesn't follow TYPE:NAME pattern"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "part doesn't follow TYPE:NAME pattern"); + + // malformed resource spec - '*' not at the end in 'Topic:*-topic' + addGrantsForToken(tokenInfo.token(), "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topic:*-topic\"}," + + "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + + "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]"); + + // wait for grants refresh + LOG.info("Waiting for: Done refreshing grants"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Response body .*Topic:\\*-topic"); + + LOG.info("Call authorize() - test grants record with malformed resource spec 'Topic:*-topic' ('*' only interpreted as asterisk at the end of resource spec)"); + result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: DENIED", AuthorizationResult.DENIED, result.get(0)); + + + // unknown scope - 'Crate' (should be 'Create') + addGrantsForToken(tokenInfo.token(), "[{\"scopes\":[\"Delete\",\"Write\",\"Describe\",\"Read\",\"Alter\",\"Crate\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"ca6f195f-dbdc-48b7-a953-8e441d17f7fa\",\"rsname\":\"Topic:my-topic*\"}," + + "{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"73af36e6-5796-43e7-8129-b57fe0bac7a1\",\"rsname\":\"Cluster:*\"}," + + "{\"scopes\":[\"Describe\",\"Read\"],\"rsid\":\"141c56e8-1a85-40f3-b38a-f490bad76913\",\"rsname\":\"Group:*\"}]"); + + // wait for grants refresh + LOG.info("Waiting for: Done refreshing grants"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Response body .*Crate"); + + LOG.info("Call authorize() - test grants record with unknown / invalid scope 'Crate' (it should be 'Create')"); + result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: DENIED", AuthorizationResult.DENIED, result.get(0)); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + /** + * Test for the semantic equality of grants - treat JSON array as a set + */ + void doGrantsSemanticEqualsTest() throws Exception { + logStart("KeycloakAuthorizerTest :: Test semantic equality of grants (JSON array -> set)"); + + String grants1 = "[{\"scopes\":[\"Write\",\"Describe\"],\"rsid\":\"a92a050d-b4f1-4e91-ac65-dbe10f17ee36\",\"rsname\":\"Topic:x_*\"},{\"scopes\":[\"Read\",\"Write\",\"Delete\",\"Describe\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"5098d4c2-0e7f-4121-8fd2-9964111370a2\",\"rsname\":\"Topic:a_*\"},{\"scopes\":[\"Read\",\"Describe\"],\"rsid\":\"916ed684-5bd0-42b1-b7ab-3b23448d3f50\",\"rsname\":\"Group:a_*\"},{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"d71850c3-5ea8-47ef-9a61-4fab9c1df363\",\"rsname\":\"kafka-cluster:my-cluster,Cluster:*\"}]"; + String grants2 = "[{\"scopes\":[\"Write\",\"Describe\",\"Create\"],\"rsid\":\"a92a050d-b4f1-4e91-ac65-dbe10f17ee36\",\"rsname\":\"Topic:x_*\"},{\"scopes\":[\"Read\",\"Write\",\"Delete\",\"Describe\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"5098d4c2-0e7f-4121-8fd2-9964111370a2\",\"rsname\":\"Topic:a_*\"},{\"scopes\":[\"Read\",\"Describe\"],\"rsid\":\"916ed684-5bd0-42b1-b7ab-3b23448d3f50\",\"rsname\":\"Group:a_*\"},{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"d71850c3-5ea8-47ef-9a61-4fab9c1df363\",\"rsname\":\"kafka-cluster:my-cluster,Cluster:*\"}]"; + String grants3 = "[{\"scopes\":[\"Read\",\"Write\",\"Delete\",\"Describe\",\"Alter\",\"Create\",\"DescribeConfigs\",\"AlterConfigs\"],\"rsid\":\"5098d4c2-0e7f-4121-8fd2-9964111370a2\",\"rsname\":\"Topic:a_*\"},{\"scopes\":[\"Write\",\"Describe\",\"Create\"],\"rsid\":\"a92a050d-b4f1-4e91-ac65-dbe10f17ee36\",\"rsname\":\"Topic:x_*\"},{\"scopes\":[\"Read\",\"Describe\"],\"rsid\":\"916ed684-5bd0-42b1-b7ab-3b23448d3f50\",\"rsname\":\"Group:a_*\"},{\"scopes\":[\"IdempotentWrite\"],\"rsid\":\"d71850c3-5ea8-47ef-9a61-4fab9c1df363\",\"rsname\":\"kafka-cluster:my-cluster,Cluster:*\"}]"; + + // make sure the token endpoint works fine + changeAuthServerMode("token", "MODE_200"); + + // login as some user - alice in our case, and get the token + TokenInfo tokenInfo = login(TOKEN_ENDPOINT, USER_ALICE, USER_ALICE_PASS, 0); + OAuthKafkaPrincipal principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, USER_ALICE, TestTokenFactory.newTokenForUser(tokenInfo)); + + // Set grants for the user to `grants1` + addGrantsForToken(tokenInfo.token(), grants1); + + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, "x_topic", PatternType.LITERAL), + 1, true, true)); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + // seek to the end of log file + logReader.readNext(); + + HashMap props = configureAuthorizer(); + props.put(AuthzConfig.STRIMZI_AUTHORIZATION_GRANTS_REFRESH_PERIOD_SECONDS, "2"); + + try (KeycloakAuthorizer authorizer = new KeycloakAuthorizer()) { + authorizer.configure(props); + + + AuthorizableRequestContext authzContext = newAuthorizableRequestContext(principal); + + LOG.info("Call authorize() - create x_topic"); + List result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: DENIED", AuthorizationResult.DENIED, result.get(0)); + + // Check log for 'Saving non-null grants for user: alice' + LOG.info("Waiting for: Saving non-null grants"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Saving non-null grants for user: alice"); + + // set grants for the user to `grants2` which are semantically different from `grants1` + addGrantsForToken(tokenInfo.token(), grants2); + + // wait for the refresh job to fetch the new grants + // Check log for 'Grants have changed for user: alice' + LOG.info("Waiting for: Grants have changed"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Grants have changed for user: alice"); + + // set grants for the user to `grants3` which are semantically equal to `grants2` + addGrantsForToken(tokenInfo.token(), grants3); + + // wait for the refresh job to fetch the new grants + LOG.info("Waiting for: Refreshing grants to start"); // Make sure to not repeat the below condition in the string here + waitFor(logReader, "Refreshing authorization grants"); + + // Check log for 'Done refreshing grants' and there should be no preceding line containing 'Grants have changed for user' + // wait for refresh grants job to complete + LOG.info("Waiting for grants refresh to complete"); // Make sure to not repeat the below condition in the string here + List lines = waitFor(logReader, "Done refreshing grants"); + + int matchCount = countLogForRegex(lines, "Grants have changed for user"); + Assert.assertEquals("Grants have changed again ?!?", 0, matchCount); + + result = authorizer.authorize(authzContext, actions); + Assert.assertEquals("Authz result: ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + void doSingletonTest() throws Exception { + logStart("KeycloakAuthorizerTest :: Ensure that multiple instantiated KeycloakAuthorizers share a single instance of KeycloakRBACAuthorizer"); + + HashMap config = configureAuthorizer(); + + LogLineReader logReader = new LogLineReader(LOG_PATH); + logReader.readNext(); + + List lines; + try (KeycloakAuthorizer authorizer1 = new KeycloakAuthorizer(); + KeycloakAuthorizer authorizer2 = new KeycloakAuthorizer()) { + + authorizer1.configure(config); + authorizer2.configure(config); + + lines = logReader.readNext(); + + List keycloakAuthorizerLines = lines.stream().filter(line -> line.contains("Configured KeycloakAuthorizer@")).collect(Collectors.toList()); + List keycloakRBACAuthorizerLines = lines.stream().filter(line -> line.contains("Configured KeycloakRBACAuthorizer@")).collect(Collectors.toList()); + + Assert.assertEquals("Configured KeycloakAuthorizer", 2, keycloakAuthorizerLines.size()); + Assert.assertEquals("Configured KeycloakRBACAuthorizer", 1, keycloakRBACAuthorizerLines.size()); + } + + TestAuthzUtil.clearKeycloakAuthorizerService(); + } + + private List waitFor(LogLineReader logReader, String condition) throws TimeoutException, InterruptedException { + List result = new ArrayList<>(); + TestUtil.waitForCondition(() -> { + try { + List lines = logReader.readNext(); + int lineNum = findFirstMatchingInLog(lines, condition); + if (lineNum >= 0) { + result.addAll(lines.subList(0, lineNum)); + return true; + } + result.addAll(lines); + return false; + } catch (Exception e) { + throw new RuntimeException("Failed to read log", e); + } + }, LOOP_PAUSE_MS, TIMEOUT_SECONDS); + + return result; + } + + private static Future> submitAuthorizationCall(KeycloakAuthorizer authorizer, AuthorizableRequestContext ctx, ExecutorService executorService, String topic) { + return executorService.submit(() -> { + List actions = new ArrayList<>(); + actions.add(new Action( + AclOperation.CREATE, + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), + 1, true, true)); + + return authorizer.authorize(ctx, actions); + }); + } + + private HashMap configureAuthorizer() { + return configureAuthorizer(CLIENT_SRV, CLIENT_SRV_SECRET, TRUSTSTORE_PATH, TRUSTSTORE_PASS); + } + + static HashMap configureAuthorizer(String clientSrv, String clientSrvSecret, String trustStorePath, String trustsStorePass) { HashMap props = new HashMap<>(); - props.put("strimzi.authorization.ssl.truststore.location", truststorePath); - props.put("strimzi.authorization.ssl.truststore.password", truststorePass); + props.put("strimzi.authorization.ssl.truststore.location", trustStorePath); + props.put("strimzi.authorization.ssl.truststore.password", trustsStorePass); props.put("strimzi.authorization.ssl.truststore.type", "pkcs12"); props.put("strimzi.authorization.enable.metrics", "true"); - props.put("strimzi.authorization.token.endpoint.uri", "https://mockoauth:8090/failing_grants"); + props.put("strimzi.authorization.token.endpoint.uri", "https://mockoauth:8090/grants"); props.put("strimzi.authorization.client.id", clientSrv); props.put("strimzi.authorization.client.secret", clientSrvSecret); props.put("strimzi.authorization.kafka.cluster.name", "my-cluster"); @@ -79,57 +926,65 @@ public void doHttpRetriesTest() throws IOException { props.put("strimzi.authorization.http.retries", "1"); props.put("super.users", "User:admin;User:service-account-kafka"); props.put("principal.builder.class", "io.strimzi.kafka.oauth.server.OAuthKafkaPrincipalBuilder"); + return props; + } - KeycloakRBACAuthorizer authorizer = new KeycloakRBACAuthorizer(); - authorizer.configure(props); - - - SecurityProtocol protocol = SecurityProtocol.SASL_PLAINTEXT; - - try { - login(tokenEndpoint, userAlice, userAlicePass, 0); - - Assert.fail("Should have failed while logging in with password"); - - } catch (Exception expected) { - login(tokenEndpoint, userAlice, userAlicePass, 0); + static int countLogForRegex(List log, String regex) { + int count = 0; + Pattern pattern = Pattern.compile(prepareRegex(regex)); + for (String line: log) { + if (pattern.matcher(line).matches()) { + count += 1; + } } + return count; + } - // Now try again - TokenInfo tokenInfo = login(tokenEndpoint, userAlice, userAlicePass, 1); + static int findFirstMatchingInLog(List log, String regex) { + int lineNum = 0; + Pattern pattern = Pattern.compile(prepareRegex(regex)); + for (String line: log) { + if (pattern.matcher(line).matches()) { + return lineNum; + } + lineNum++; + } + return -1; + } - KafkaPrincipal principal = new OAuthKafkaPrincipal(KafkaPrincipal.USER_TYPE, "alice", new Common.MockBearerTokenWithPayload(tokenInfo)); + static String prepareRegex(String regex) { + String prefix = regex.startsWith("^") ? "" : ".*"; + String suffix = regex.endsWith("$") ? "" : ".*"; + return prefix + regex + suffix; + } + static boolean checkLogForRegex(List log, String regex) { + Pattern pattern = Pattern.compile(prepareRegex(regex)); + for (String line: log) { + if (pattern.matcher(line).matches()) { + return true; + } + } + return false; + } + private AuthorizableRequestContext newAuthorizableRequestContext(KafkaPrincipal principal) { AuthorizableRequestContext ctx = mock(AuthorizableRequestContext.class); when(ctx.listenerName()).thenReturn("JWT"); - when(ctx.securityProtocol()).thenReturn(protocol); + when(ctx.securityProtocol()).thenReturn(SecurityProtocol.SASL_PLAINTEXT); when(ctx.principal()).thenReturn(principal); - when(ctx.clientId()).thenReturn(clientCli); - - - List actions = new ArrayList<>(); - actions.add(new Action( - AclOperation.CREATE, - new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL), - 1, true, true)); - - - List result = authorizer.authorize(ctx, actions); - Assert.assertNotNull("Authorizer has to return non-null", result); - Assert.assertTrue("Authorizer has to return as many results as it received inputs", result.size() == actions.size()); - Assert.assertEquals("Authorizer should return ALLOWED", AuthorizationResult.ALLOWED, result.get(0)); + when(ctx.clientId()).thenReturn(CLIENT_CLI); + return ctx; } - @NotNull private TokenInfo login(String tokenEndpoint, String user, String userPass, int retries) throws IOException { return OAuthAuthenticator.loginWithPassword( URI.create(tokenEndpoint), - SSLUtil.createSSLFactory(truststorePath, null, truststorePass, null, null), + SSLUtil.createSSLFactory(TRUSTSTORE_PATH, null, TRUSTSTORE_PASS, null, null), null, user, userPass, - clientCli, + CLIENT_CLI, null, true, new PrincipalExtractor(), @@ -140,4 +995,13 @@ private TokenInfo login(String tokenEndpoint, String user, String userPass, int retries, 0); } + + private void logStart(String msg) { + System.out.println(); + System.out.println("======== " + msg); + System.out.println(); + + // Log to file as well for better readability + LOG.info("======== " + msg); + } } diff --git a/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/LogLineReader.java b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/LogLineReader.java new file mode 100644 index 00000000..24bb26d1 --- /dev/null +++ b/testsuite/mockoauth-tests/src/test/java/io/strimzi/testsuite/oauth/mockoauth/LogLineReader.java @@ -0,0 +1,30 @@ +/* + * Copyright 2017-2023, Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.testsuite.oauth.mockoauth; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; + +/** + * A very inefficient but simple and good-enough-for-tests implementation of incrementally reading a log file and serving content as lines + */ +public class LogLineReader { + + private final String logPath; + private int logLineOffset = 0; + + LogLineReader(String logPath) { + this.logPath = logPath; + } + + List readNext() throws IOException { + List lines = Files.readAllLines(Paths.get(logPath)); + List result = lines.subList(logLineOffset, lines.size()); + logLineOffset = lines.size(); + return result; + } +} diff --git a/testsuite/mockoauth-tests/src/test/resources/simplelogger.properties b/testsuite/mockoauth-tests/src/test/resources/simplelogger.properties index ecd50f33..775890a1 100644 --- a/testsuite/mockoauth-tests/src/test/resources/simplelogger.properties +++ b/testsuite/mockoauth-tests/src/test/resources/simplelogger.properties @@ -1,2 +1,6 @@ +# These tests rely on file logging to the specified location and on io.strimzi=TRACE level logs +org.slf4j.simpleLogger.logFile=target/test.log +org.slf4j.simpleLogger.showDateTime=true +org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS org.slf4j.simpleLogger.log.org.apache.kafka=OFF -org.slf4j.simpleLogger.log.io.strimzi=OFF \ No newline at end of file +org.slf4j.simpleLogger.log.io.strimzi=TRACE \ No newline at end of file diff --git a/testsuite/pom.xml b/testsuite/pom.xml index 6e3a4448..86c9c829 100644 --- a/testsuite/pom.xml +++ b/testsuite/pom.xml @@ -17,6 +17,8 @@ mockoauth-tests keycloak-auth-tests keycloak-authz-tests + keycloak-authz-kraft-tests + keycloak-authz-zk-tests keycloak-errors-tests hydra-test @@ -40,13 +42,13 @@ 0.40.2 1.17.3 - 4.13.1 + 4.13.2 4.7.0 2.13.4 1.0.0-SNAPSHOT .. - quay.io/strimzi/kafka:0.33.2-kafka-3.4.0 + quay.io/strimzi/kafka:0.34.0-kafka-3.4.0 @@ -63,6 +65,11 @@ common ${project.version} + + io.strimzi.oauth.testsuite + keycloak-authz-tests + ${project.version} + org.testcontainers testcontainers-bom @@ -75,6 +82,16 @@ jackson-annotations ${jackson.annotation.version} + + junit + junit + ${version.junit} + + + com.github.spotbugs + spotbugs-annotations + ${spotbugs.version} + @@ -397,7 +414,7 @@ kafka-3_4_0 - quay.io/strimzi/kafka:0.33.2-kafka-3.4.0 + quay.io/strimzi/kafka:0.34.0-kafka-3.4.0