From b507ebed18a94be8467b54b9b9a881f911c280b4 Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Mon, 13 Jan 2025 07:37:46 -0500 Subject: [PATCH 01/15] Fix spelling in log messages (#50974) --- lib/backend/memory/memory.go | 2 +- lib/events/complete.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/backend/memory/memory.go b/lib/backend/memory/memory.go index cd00a6bb6efaa..4adb2b0779803 100644 --- a/lib/backend/memory/memory.go +++ b/lib/backend/memory/memory.go @@ -472,7 +472,7 @@ func (m *Memory) removeExpired() int { } m.heap.PopEl() m.tree.Delete(item) - m.logger.DebugContext(m.ctx, "Removed expired item.", "key", item.Key.String(), "epiry", item.Expires) + m.logger.DebugContext(m.ctx, "Removed expired item.", "key", item.Key.String(), "expiry", item.Expires) removed++ event := backend.Event{ diff --git a/lib/events/complete.go b/lib/events/complete.go index de9022391a533..881e02e80ce62 100644 --- a/lib/events/complete.go +++ b/lib/events/complete.go @@ -271,7 +271,7 @@ func (u *UploadCompleter) CheckUploads(ctx context.Context) error { continue } - log.DebugContext(ctx, "foud upload with parts", "part_count", len(parts)) + log.DebugContext(ctx, "found upload with parts", "part_count", len(parts)) if err := u.cfg.Uploader.CompleteUpload(ctx, upload, parts); trace.IsNotFound(err) { log.DebugContext(ctx, "Upload not found, moving on to next upload", "error", err) From 8e513b4643176d3a5285c4c05b75e8c95c2b6d44 Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Mon, 13 Jan 2025 07:37:55 -0500 Subject: [PATCH 02/15] Fix spelling for log messages (#50917) --- tool/tsh/common/git_config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tool/tsh/common/git_config.go b/tool/tsh/common/git_config.go index 89771735b30b3..6b703af251cee 100644 --- a/tool/tsh/common/git_config.go +++ b/tool/tsh/common/git_config.go @@ -124,11 +124,11 @@ func (c *gitConfigCommand) doUpdate(cf *CLIConf) error { for _, url := range strings.Split(urls, "\n") { u, err := parseGitSSHURL(url) if err != nil { - logger.DebugContext(cf.Context, "Skippig URL", "error", err, "url", url) + logger.DebugContext(cf.Context, "Skipping URL", "error", err, "url", url) continue } if !u.isGitHub() { - logger.DebugContext(cf.Context, "Skippig non-GitHub host", "host", u.Host) + logger.DebugContext(cf.Context, "Skipping non-GitHub host", "host", u.Host) continue } From 3c87bea3cf768258c9ac059e84553e43918e266f Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:54:42 -0500 Subject: [PATCH 03/15] Fix incorrectly named dynamo events scaling policy (#50907) The read scaling policy name was incorrectly changed to match the write scaling policy. This prevents upgrading from a v16 cluster with a dynamo audit backend configured to use autoscaling to v17. To resolve, when conflicts are found the incorrectly named scaling policy is removed, and replaced by the correctly named one. --- lib/events/dynamoevents/dynamoevents.go | 49 +++++++++++++++++-------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/lib/events/dynamoevents/dynamoevents.go b/lib/events/dynamoevents/dynamoevents.go index 5c2036336b278..9e8a18c92b4c8 100644 --- a/lib/events/dynamoevents/dynamoevents.go +++ b/lib/events/dynamoevents/dynamoevents.go @@ -437,14 +437,14 @@ func (l *Log) configureTable(ctx context.Context, svc *applicationautoscaling.Cl readDimension: autoscalingtypes.ScalableDimensionDynamoDBTableReadCapacityUnits, writeDimension: autoscalingtypes.ScalableDimensionDynamoDBTableWriteCapacityUnits, resourceID: fmt.Sprintf("table/%s", l.Tablename), - readPolicy: fmt.Sprintf("%s-write-target-tracking-scaling-policy", l.Tablename), + readPolicy: fmt.Sprintf("%s-read-target-tracking-scaling-policy", l.Tablename), writePolicy: fmt.Sprintf("%s-write-target-tracking-scaling-policy", l.Tablename), }, { readDimension: autoscalingtypes.ScalableDimensionDynamoDBIndexReadCapacityUnits, writeDimension: autoscalingtypes.ScalableDimensionDynamoDBIndexWriteCapacityUnits, resourceID: fmt.Sprintf("table/%s/index/%s", l.Tablename, indexTimeSearchV2), - readPolicy: fmt.Sprintf("%s/index/%s-write-target-tracking-scaling-policy", l.Tablename, indexTimeSearchV2), + readPolicy: fmt.Sprintf("%s/index/%s-read-target-tracking-scaling-policy", l.Tablename, indexTimeSearchV2), writePolicy: fmt.Sprintf("%s/index/%s-write-target-tracking-scaling-policy", l.Tablename, indexTimeSearchV2), }, } @@ -472,20 +472,39 @@ func (l *Log) configureTable(ctx context.Context, svc *applicationautoscaling.Cl // Define scaling policy. Defines the ratio of {read,write} consumed capacity to // provisioned capacity DynamoDB will try and maintain. - if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String(p.readPolicy), - PolicyType: autoscalingtypes.PolicyTypeTargetTrackingScaling, - ResourceId: aws.String(p.resourceID), - ScalableDimension: p.readDimension, - ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb, - TargetTrackingScalingPolicyConfiguration: &autoscalingtypes.TargetTrackingScalingPolicyConfiguration{ - PredefinedMetricSpecification: &autoscalingtypes.PredefinedMetricSpecification{ - PredefinedMetricType: autoscalingtypes.MetricTypeDynamoDBReadCapacityUtilization, + for i := 0; i < 2; i++ { + if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{ + PolicyName: aws.String(p.readPolicy), + PolicyType: autoscalingtypes.PolicyTypeTargetTrackingScaling, + ResourceId: aws.String(p.resourceID), + ScalableDimension: p.readDimension, + ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb, + TargetTrackingScalingPolicyConfiguration: &autoscalingtypes.TargetTrackingScalingPolicyConfiguration{ + PredefinedMetricSpecification: &autoscalingtypes.PredefinedMetricSpecification{ + PredefinedMetricType: autoscalingtypes.MetricTypeDynamoDBReadCapacityUtilization, + }, + TargetValue: aws.Float64(l.ReadTargetValue), }, - TargetValue: aws.Float64(l.ReadTargetValue), - }, - }); err != nil { - return trace.Wrap(convertError(err)) + }); err != nil { + // The read policy name was accidentally changed to match the write policy in 17.0.0-17.1.4. This + // prevented upgrading a cluster with autoscaling enabled from v16 to v17. To resolve in + // a backwards compatible way, the read policy name was restored, however, any new clusters that + // were created between 17.0.0 and 17.1.4 need to have the misnamed policy deleted and recreated + // with the correct name. + if i == 1 || !strings.Contains(err.Error(), "ValidationException: Only one TargetTrackingScaling policy for a given metric specification is allowed.") { + return trace.Wrap(convertError(err)) + } + + l.logger.DebugContext(ctx, "Fixing incorrectly named scaling policy") + if _, err := svc.DeleteScalingPolicy(ctx, &applicationautoscaling.DeleteScalingPolicyInput{ + PolicyName: aws.String(p.writePolicy), + ResourceId: aws.String(p.resourceID), + ScalableDimension: p.readDimension, + ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb, + }); err != nil { + return trace.Wrap(convertError(err)) + } + } } if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{ From 3653dda62282a3632229db7c10420064fefb552c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Cie=C5=9Blak?= Date: Mon, 13 Jan 2025 17:55:41 +0100 Subject: [PATCH 04/15] Add basic support for target port in gateways in Connect (#50912) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update type for targetSubresourceName on DocumentGateway The way DocumentsService.createGatewayDocument is implemented means that the targetSubresourceName property is always present, but it can be undefined. * Use "local port" instead of "port" in DocumentGatewapApp * Rewrite gateway FieldInputs to use styled components * Update comments in protos * useGateway: Stabilize useAsync functions of ports * Add padding to menu label if it's first child * Add support for required prop to Input and FieldInput * Add UI for changing target port * ActionButtons: Show ports of multi-port apps when VNet is not supported Now that we have support for the target port in Connect's gateways, we can show the ports and then open a gateway for that specific port on click. * Add RWMutex to gateways * Clear app gateway cert on target port change * Remove gateways/app.LocalProxyURL It was used only in tests and it made sense only for web apps anyway. * TestTCP: Close connections when test ends * Create context with timeout in testGatewayCertRenewal …instead of in each function that uses it. * Add tests for changing the target port of a TCP gateway * Parallelize app gateway tests within MFA/non-MFA groups * Make testGatewayConnection take ctx as first arg This will be needed in tests that check target port validation. * Validate target port of app gateways * Increase timeouts in app gateway tests * Change icons from medium to small * Use consistent spacing in AppGateway * Add godoc for ValidateTargetPort * Add retry with relogin to change target port --- .../go/teleport/lib/teleterm/v1/gateway.pb.go | 5 +- .../ts/teleport/lib/teleterm/v1/gateway_pb.ts | 5 +- integration/appaccess/appaccess_test.go | 1 + integration/appaccess/pack.go | 56 ++++ integration/proxy/proxy_helpers.go | 53 +++- integration/proxy/proxy_test.go | 30 ++- integration/proxy/teleterm_test.go | 254 +++++++++++++++--- .../apiserver/handler/handler_gateways.go | 2 +- lib/teleterm/clusters/cluster_apps.go | 31 ++- lib/teleterm/clusters/cluster_gateways.go | 31 ++- lib/teleterm/daemon/daemon.go | 24 +- lib/teleterm/gateway/app.go | 11 - lib/teleterm/gateway/app_middleware.go | 6 +- lib/teleterm/gateway/base.go | 28 +- lib/teleterm/gateway/config.go | 5 + lib/teleterm/gateway/interfaces.go | 5 +- lib/teleterm/gateway/kube.go | 2 + proto/teleport/lib/teleterm/v1/gateway.proto | 5 +- web/packages/design/src/Input/Input.tsx | 3 + web/packages/design/src/Menu/Menu.story.tsx | 12 + web/packages/design/src/Menu/MenuItem.tsx | 30 ++- web/packages/design/src/keyframes.ts | 4 + .../components/FieldInput/FieldInput.tsx | 5 +- .../teleterm/src/services/tshd/testHelpers.ts | 2 +- .../src/ui/DocumentCluster/ActionButtons.tsx | 97 ++++--- .../src/ui/DocumentGateway/useGateway.ts | 63 +++-- .../src/ui/DocumentGatewayApp/AppGateway.tsx | 208 +++++++++++--- .../DocumentGatewayApp.story.tsx | 47 +++- .../DocumentGatewayApp/DocumentGatewayApp.tsx | 13 +- .../src/ui/TabHost/useTabShortcuts.test.tsx | 2 + .../src/ui/components/FieldInputs.tsx | 37 +-- .../src/ui/components/OfflineGateway.tsx | 4 +- .../documentsService/connectToApp.test.ts | 28 +- .../documentsService/connectToApp.ts | 20 +- .../documentsService/documentsService.test.ts | 2 + .../documentsService/types.ts | 6 +- 36 files changed, 904 insertions(+), 233 deletions(-) diff --git a/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go b/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go index 612afa9d557af..17d5fd1e02179 100644 --- a/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go +++ b/gen/proto/go/teleport/lib/teleterm/v1/gateway.pb.go @@ -59,10 +59,11 @@ type Gateway struct { LocalAddress string `protobuf:"bytes,5,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"` // local_port is the gateway address on localhost LocalPort string `protobuf:"bytes,6,opt,name=local_port,json=localPort,proto3" json:"local_port,omitempty"` - // protocol is the gateway protocol + // protocol is the protocol used by the gateway. For databases, it matches the type of the + // database that the gateway targets. For apps, it's either "HTTP" or "TCP". Protocol string `protobuf:"bytes,7,opt,name=protocol,proto3" json:"protocol,omitempty"` // target_subresource_name points at a subresource of the remote resource, for example a - // database name on a database server. + // database name on a database server or a target port of a multi-port TCP app. TargetSubresourceName string `protobuf:"bytes,9,opt,name=target_subresource_name,json=targetSubresourceName,proto3" json:"target_subresource_name,omitempty"` // gateway_cli_client represents a command that the user can execute to connect to the resource // through the gateway. diff --git a/gen/proto/ts/teleport/lib/teleterm/v1/gateway_pb.ts b/gen/proto/ts/teleport/lib/teleterm/v1/gateway_pb.ts index f6523f7cc2210..194cc93867671 100644 --- a/gen/proto/ts/teleport/lib/teleterm/v1/gateway_pb.ts +++ b/gen/proto/ts/teleport/lib/teleterm/v1/gateway_pb.ts @@ -80,14 +80,15 @@ export interface Gateway { */ localPort: string; /** - * protocol is the gateway protocol + * protocol is the protocol used by the gateway. For databases, it matches the type of the + * database that the gateway targets. For apps, it's either "HTTP" or "TCP". * * @generated from protobuf field: string protocol = 7; */ protocol: string; /** * target_subresource_name points at a subresource of the remote resource, for example a - * database name on a database server. + * database name on a database server or a target port of a multi-port TCP app. * * @generated from protobuf field: string target_subresource_name = 9; */ diff --git a/integration/appaccess/appaccess_test.go b/integration/appaccess/appaccess_test.go index dffd5f8aa1912..8bb73e091754b 100644 --- a/integration/appaccess/appaccess_test.go +++ b/integration/appaccess/appaccess_test.go @@ -831,6 +831,7 @@ func TestTCP(t *testing.T) { conn, err := net.Dial("tcp", localProxyAddress) require.NoError(t, err) + defer conn.Close() buf := make([]byte, 1024) n, err := conn.Read(buf) diff --git a/integration/appaccess/pack.go b/integration/appaccess/pack.go index 5deabac208c4c..5a5de08691da4 100644 --- a/integration/appaccess/pack.go +++ b/integration/appaccess/pack.go @@ -184,6 +184,34 @@ func (p *Pack) RootAppPublicAddr() string { return p.rootAppPublicAddr } +func (p *Pack) RootTCPAppName() string { + return p.rootTCPAppName +} + +func (p *Pack) RootTCPMessage() string { + return p.rootTCPMessage +} + +func (p *Pack) RootTCPMultiPortAppName() string { + return p.rootTCPMultiPortAppName +} + +func (p *Pack) RootTCPMultiPortAppPortAlpha() int { + return p.rootTCPMultiPortAppPortAlpha +} + +func (p *Pack) RootTCPMultiPortMessageAlpha() string { + return p.rootTCPMultiPortMessageAlpha +} + +func (p *Pack) RootTCPMultiPortAppPortBeta() int { + return p.rootTCPMultiPortAppPortBeta +} + +func (p *Pack) RootTCPMultiPortMessageBeta() string { + return p.rootTCPMultiPortMessageBeta +} + func (p *Pack) RootAuthServer() *auth.Server { return p.rootCluster.Process.GetAuthServer() } @@ -200,6 +228,34 @@ func (p *Pack) LeafAppPublicAddr() string { return p.leafAppPublicAddr } +func (p *Pack) LeafTCPAppName() string { + return p.leafTCPAppName +} + +func (p *Pack) LeafTCPMessage() string { + return p.leafTCPMessage +} + +func (p *Pack) LeafTCPMultiPortAppName() string { + return p.leafTCPMultiPortAppName +} + +func (p *Pack) LeafTCPMultiPortAppPortAlpha() int { + return p.leafTCPMultiPortAppPortAlpha +} + +func (p *Pack) LeafTCPMultiPortMessageAlpha() string { + return p.leafTCPMultiPortMessageAlpha +} + +func (p *Pack) LeafTCPMultiPortAppPortBeta() int { + return p.leafTCPMultiPortAppPortBeta +} + +func (p *Pack) LeafTCPMultiPortMessageBeta() string { + return p.leafTCPMultiPortMessageBeta +} + func (p *Pack) LeafAuthServer() *auth.Server { return p.leafCluster.Process.GetAuthServer() } diff --git a/integration/proxy/proxy_helpers.go b/integration/proxy/proxy_helpers.go index 789ab0f4f577f..b5796110eb53d 100644 --- a/integration/proxy/proxy_helpers.go +++ b/integration/proxy/proxy_helpers.go @@ -28,6 +28,7 @@ import ( "net/http" "net/url" "path/filepath" + "strconv" "strings" "testing" "time" @@ -684,7 +685,7 @@ func mustFindKubePod(t *testing.T, tc *client.TeleportClient) { require.Equal(t, types.KindKubePod, response.Resources[0].Kind) } -func mustConnectDatabaseGateway(t *testing.T, _ *daemon.Service, gw gateway.Gateway) { +func mustConnectDatabaseGateway(ctx context.Context, t *testing.T, _ *daemon.Service, gw gateway.Gateway) { t.Helper() dbGateway, err := gateway.AsDatabase(gw) @@ -705,15 +706,15 @@ func mustConnectDatabaseGateway(t *testing.T, _ *daemon.Service, gw gateway.Gate require.NoError(t, client.Close()) } -// mustConnectAppGateway verifies that the gateway acts as an unauthenticated proxy that forwards -// requests to the app behind it. -func mustConnectAppGateway(t *testing.T, _ *daemon.Service, gw gateway.Gateway) { +// mustConnectWebAppGateway verifies that the gateway acts as an unauthenticated proxy that forwards +// requests to the web app behind it. +func mustConnectWebAppGateway(ctx context.Context, t *testing.T, _ *daemon.Service, gw gateway.Gateway) { t.Helper() - appGw, err := gateway.AsApp(gw) - require.NoError(t, err) + gatewayAddress := net.JoinHostPort(gw.LocalAddress(), gw.LocalPort()) + gatewayURL := fmt.Sprintf("http://%s", gatewayAddress) - req, err := http.NewRequest(http.MethodGet, appGw.LocalProxyURL(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, gatewayURL, nil) require.NoError(t, err) client := &http.Client{} @@ -724,6 +725,44 @@ func mustConnectAppGateway(t *testing.T, _ *daemon.Service, gw gateway.Gateway) require.Equal(t, http.StatusOK, resp.StatusCode) } +func makeMustConnectMultiPortTCPAppGateway(wantMessage string, otherTargetPort int, otherWantMessage string) testGatewayConnectionFunc { + return func(ctx context.Context, t *testing.T, d *daemon.Service, gw gateway.Gateway) { + t.Helper() + + gwURI := gw.URI().String() + originalTargetPort := gw.TargetSubresourceName() + makeMustConnectTCPAppGateway(wantMessage)(ctx, t, d, gw) + + _, err := d.SetGatewayTargetSubresourceName(ctx, gwURI, strconv.Itoa(otherTargetPort)) + require.NoError(t, err) + makeMustConnectTCPAppGateway(otherWantMessage)(ctx, t, d, gw) + + // Restore the original port, so that the next time the test calls this function after certs + // expire, wantMessage is going to match the port that the gateway points to. + _, err = d.SetGatewayTargetSubresourceName(ctx, gwURI, originalTargetPort) + require.NoError(t, err) + makeMustConnectTCPAppGateway(wantMessage)(ctx, t, d, gw) + } +} + +func makeMustConnectTCPAppGateway(wantMessage string) testGatewayConnectionFunc { + return func(ctx context.Context, t *testing.T, _ *daemon.Service, gw gateway.Gateway) { + t.Helper() + + gatewayAddress := net.JoinHostPort(gw.LocalAddress(), gw.LocalPort()) + conn, err := net.Dial("tcp", gatewayAddress) + require.NoError(t, err) + defer conn.Close() + + buf := make([]byte, 1024) + n, err := conn.Read(buf) + require.NoError(t, err) + + resp := strings.TrimSpace(string(buf[:n])) + require.Equal(t, wantMessage, resp) + } +} + func kubeClientForLocalProxy(t *testing.T, kubeconfigPath, teleportCluster, kubeCluster string) *kubernetes.Clientset { t.Helper() diff --git a/integration/proxy/proxy_test.go b/integration/proxy/proxy_test.go index cf75bfd5f146b..262b8c1046726 100644 --- a/integration/proxy/proxy_test.go +++ b/integration/proxy/proxy_test.go @@ -54,6 +54,7 @@ import ( "github.com/gravitational/teleport/lib" "github.com/gravitational/teleport/lib/auth/testauthority" libclient "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/mfa" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/multiplexer" @@ -1315,18 +1316,29 @@ func TestALPNSNIProxyAppAccess(t *testing.T) { }) t.Run("teleterm app gateways cert renewal", func(t *testing.T) { - user, _ := pack.CreateUser(t) - tc := pack.MakeTeleportClient(t, user.GetName()) - - // test without per session MFA. - testTeletermAppGateway(t, pack, tc) + t.Run("without per-session MFA", func(t *testing.T) { + makeTC := func(t *testing.T) (*libclient.TeleportClient, mfa.WebauthnLoginFunc) { + user, _ := pack.CreateUser(t) + tc := pack.MakeTeleportClient(t, user.GetName()) + return tc, nil + } + testTeletermAppGateway(t, pack, makeTC) + testTeletermAppGatewayTargetPortValidation(t, pack, makeTC) + }) - t.Run("per session MFA", func(t *testing.T) { - // They update user's authentication to Webauthn so they must run after tests which do not use MFA. + t.Run("per-session MFA", func(t *testing.T) { + // They update clusters authentication to Webauthn so they must run after tests which do not use MFA. requireSessionMFAAuthPref(ctx, t, pack.RootAuthServer(), "127.0.0.1") requireSessionMFAAuthPref(ctx, t, pack.LeafAuthServer(), "127.0.0.1") - tc.WebauthnLogin = setupUserMFA(ctx, t, pack.RootAuthServer(), user.GetName(), "127.0.0.1") - testTeletermAppGateway(t, pack, tc) + makeTCAndWebauthnLogin := func(t *testing.T) (*libclient.TeleportClient, mfa.WebauthnLoginFunc) { + // Create a separate user for each tests to enable parallel tests that use per-session MFA. + // See the comment for webauthnLogin in setupUserMFA for more details. + user, _ := pack.CreateUser(t) + tc := pack.MakeTeleportClient(t, user.GetName()) + webauthnLogin := setupUserMFA(ctx, t, pack.RootAuthServer(), user.GetName(), "127.0.0.1") + return tc, webauthnLogin + } + testTeletermAppGateway(t, pack, makeTCAndWebauthnLogin) }) }) } diff --git a/integration/proxy/teleterm_test.go b/integration/proxy/teleterm_test.go index 67feeda87944c..18b0efd4884c7 100644 --- a/integration/proxy/teleterm_test.go +++ b/integration/proxy/teleterm_test.go @@ -19,9 +19,11 @@ package proxy import ( + "cmp" "context" "errors" "net" + "strconv" "sync" "sync/atomic" "testing" @@ -50,9 +52,9 @@ import ( "github.com/gravitational/teleport/lib/auth/mocku2f" wancli "github.com/gravitational/teleport/lib/auth/webauthncli" wantypes "github.com/gravitational/teleport/lib/auth/webauthntypes" - "github.com/gravitational/teleport/lib/client" libclient "github.com/gravitational/teleport/lib/client" "github.com/gravitational/teleport/lib/client/clientcache" + "github.com/gravitational/teleport/lib/client/mfa" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/service" "github.com/gravitational/teleport/lib/service/servicecfg" @@ -168,8 +170,8 @@ func testDBGatewayCertRenewal(ctx context.Context, t *testing.T, params dbGatewa TargetURI: params.databaseURI.String(), TargetUser: params.pack.Root.User.GetName(), }, - testGatewayConnectionFunc: mustConnectDatabaseGateway, - webauthnLogin: params.webauthnLogin, + testGatewayConnection: mustConnectDatabaseGateway, + webauthnLogin: params.webauthnLogin, generateAndSetupUserCreds: func(t *testing.T, tc *libclient.TeleportClient, ttl time.Duration) { creds, err := helpers.GenerateUserCreds(helpers.UserCredsRequest{ Process: params.pack.Root.Cluster.Process, @@ -184,7 +186,7 @@ func testDBGatewayCertRenewal(ctx context.Context, t *testing.T, params dbGatewa ) } -type testGatewayConnectionFunc func(*testing.T, *daemon.Service, gateway.Gateway) +type testGatewayConnectionFunc func(context.Context, *testing.T, *daemon.Service, gateway.Gateway) type generateAndSetupUserCredsFunc func(t *testing.T, tc *libclient.TeleportClient, ttl time.Duration) @@ -192,14 +194,19 @@ type gatewayCertRenewalParams struct { tc *libclient.TeleportClient albAddr string createGatewayParams daemon.CreateGatewayParams - testGatewayConnectionFunc testGatewayConnectionFunc + testGatewayConnection testGatewayConnectionFunc webauthnLogin libclient.WebauthnLoginFunc generateAndSetupUserCreds generateAndSetupUserCredsFunc + wantPromptMFACallCount int } func testGatewayCertRenewal(ctx context.Context, t *testing.T, params gatewayCertRenewalParams) { t.Helper() + // The test can potentially hang forever if something is wrong with the MFA prompt, add a timeout. + ctx, cancel := context.WithTimeout(ctx, time.Minute) + t.Cleanup(cancel) + tc := params.tc // Save the profile yaml file to disk as test helpers like helpers.NewClientWithCreds don't do @@ -273,7 +280,7 @@ func testGatewayCertRenewal(ctx context.Context, t *testing.T, params gatewayCer gateway, err := daemonService.CreateGateway(ctx, params.createGatewayParams) require.NoError(t, err, trace.DebugReport(err)) - params.testGatewayConnectionFunc(t, daemonService, gateway) + params.testGatewayConnection(ctx, t, daemonService, gateway) // Advance the fake clock to simulate the db cert expiry inside the middleware. fakeClock.Advance(time.Hour * 48) @@ -286,16 +293,17 @@ func testGatewayCertRenewal(ctx context.Context, t *testing.T, params gatewayCer // and then it will attempt to reissue the user cert using an expired user cert. // The mocked tshdEventsClient will issue a valid user cert, save it to disk, and the middleware // will let the connection through. - params.testGatewayConnectionFunc(t, daemonService, gateway) + params.testGatewayConnection(ctx, t, daemonService, gateway) require.Equal(t, uint32(1), tshdEventsService.reloginCallCount.Load(), "Unexpected number of calls to TSHDEventsClient.Relogin") require.Equal(t, uint32(0), tshdEventsService.sendNotificationCallCount.Load(), "Unexpected number of calls to TSHDEventsClient.SendNotification") if params.webauthnLogin != nil { - // There are two calls, one to issue the certs when creating the gateway and then another to - // reissue them after relogin. - require.Equal(t, uint32(2), tshdEventsService.promptMFACallCount.Load(), + // By default, there are two calls, one to issue the certs when creating the gateway and then + // another to reissue them after relogin. + wantCallCount := cmp.Or(params.wantPromptMFACallCount, 2) + require.Equal(t, uint32(wantCallCount), tshdEventsService.promptMFACallCount.Load(), "Unexpected number of calls to TSHDEventsClient.PromptMFA") } } @@ -474,9 +482,6 @@ func TestTeletermKubeGateway(t *testing.T) { t.Run("root with per-session MFA", func(t *testing.T) { profileName := mustGetProfileName(t, suite.root.Web) kubeURI := uri.NewClusterURI(profileName).AppendKube(kubeClusterName) - // The test can potentially hang forever if something is wrong with the MFA prompt, add a timeout. - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - t.Cleanup(cancel) testKubeGatewayCertRenewal(ctx, t, kubeGatewayCertRenewalParams{ suite: suite, kubeURI: kubeURI, @@ -486,9 +491,6 @@ func TestTeletermKubeGateway(t *testing.T) { t.Run("leaf with per-session MFA", func(t *testing.T) { profileName := mustGetProfileName(t, suite.root.Web) kubeURI := uri.NewClusterURI(profileName).AppendLeafCluster(suite.leaf.Secrets.SiteName).AppendKube(kubeClusterName) - // The test can potentially hang forever if something is wrong with the MFA prompt, add a timeout. - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - t.Cleanup(cancel) testKubeGatewayCertRenewal(ctx, t, kubeGatewayCertRenewalParams{ suite: suite, kubeURI: kubeURI, @@ -523,7 +525,7 @@ func testKubeGatewayCertRenewal(ctx context.Context, t *testing.T, params kubeGa }) require.NoError(t, err) - testKubeConnection := func(t *testing.T, daemonService *daemon.Service, gw gateway.Gateway) { + testKubeConnection := func(ctx context.Context, t *testing.T, daemonService *daemon.Service, gw gateway.Gateway) { t.Helper() clientOnce.Do(func() { @@ -548,8 +550,8 @@ func testKubeGatewayCertRenewal(ctx context.Context, t *testing.T, params kubeGa createGatewayParams: daemon.CreateGatewayParams{ TargetURI: params.kubeURI.String(), }, - testGatewayConnectionFunc: testKubeConnection, - webauthnLogin: params.webauthnLogin, + testGatewayConnection: testKubeConnection, + webauthnLogin: params.webauthnLogin, generateAndSetupUserCreds: func(t *testing.T, tc *libclient.TeleportClient, ttl time.Duration) { creds, err := helpers.GenerateUserCreds(helpers.UserCredsRequest{ Process: params.suite.root.Process, @@ -614,6 +616,10 @@ func setupUserMFA(ctx context.Context, t *testing.T, authServer *auth.Server, us }) require.NoError(t, err) + // webauthnLogin is not safe for concurrent use, partly due to the implementation of device, but + // mostly because Teleport itself doesn't allow for more than one in-flight MFA challenge. This is + // an arbitrary limitation which in theory we could change. But for now, parallel tests that use + // webauthnLogin must use a separate user for each test and not trigger parallel MFA prompts. webauthnLogin := func(ctx context.Context, origin string, assertion *wantypes.CredentialAssertion, prompt wancli.LoginPrompt, opts *wancli.LoginOpts) (*proto.MFAAuthenticateResponse, string, error) { car, err := device.SignAssertion(origin, assertion) if err != nil { @@ -676,34 +682,210 @@ func requireSessionMFARole(ctx context.Context, t *testing.T, authServer *auth.S require.NoError(t, err) } -func testTeletermAppGateway(t *testing.T, pack *appaccess.Pack, tc *client.TeleportClient) { +type makeTCAndWebauthnLoginFunc func(t *testing.T) (*libclient.TeleportClient, mfa.WebauthnLoginFunc) + +func testTeletermAppGateway(t *testing.T, pack *appaccess.Pack, makeTCAndWebauthnLogin makeTCAndWebauthnLoginFunc) { ctx := context.Background() t.Run("root cluster", func(t *testing.T) { - profileName := mustGetProfileName(t, pack.RootWebAddr()) - appURI := uri.NewClusterURI(profileName).AppendApp(pack.RootAppName()) + t.Parallel() - // The test can potentially hang forever if something is wrong with the MFA prompt, add a timeout. - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - t.Cleanup(cancel) - testAppGatewayCertRenewal(ctx, t, pack, tc, appURI) + t.Run("web app", func(t *testing.T) { + t.Parallel() + + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName).AppendApp(pack.RootAppName()) + + testAppGatewayCertRenewal(ctx, t, pack, makeTCAndWebauthnLogin, appURI) + }) + + t.Run("TCP app", func(t *testing.T) { + t.Parallel() + + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName).AppendApp(pack.RootTCPAppName()) + + tc, webauthnLogin := makeTCAndWebauthnLogin(t) + + testGatewayCertRenewal( + ctx, + t, + gatewayCertRenewalParams{ + tc: tc, + createGatewayParams: daemon.CreateGatewayParams{TargetURI: appURI.String()}, + testGatewayConnection: makeMustConnectTCPAppGateway(pack.RootTCPMessage()), + generateAndSetupUserCreds: pack.GenerateAndSetupUserCreds, + webauthnLogin: webauthnLogin, + }, + ) + }) + + t.Run("multi-port TCP app", func(t *testing.T) { + t.Parallel() + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName).AppendApp(pack.RootTCPMultiPortAppName()) + + tc, webauthnLogin := makeTCAndWebauthnLogin(t) + + testGatewayCertRenewal( + ctx, + t, + gatewayCertRenewalParams{ + tc: tc, + createGatewayParams: daemon.CreateGatewayParams{ + TargetURI: appURI.String(), + TargetSubresourceName: strconv.Itoa(pack.RootTCPMultiPortAppPortAlpha()), + }, + testGatewayConnection: makeMustConnectMultiPortTCPAppGateway( + pack.RootTCPMultiPortMessageAlpha(), pack.RootTCPMultiPortAppPortBeta(), pack.RootTCPMultiPortMessageBeta(), + ), + generateAndSetupUserCreds: pack.GenerateAndSetupUserCreds, + webauthnLogin: webauthnLogin, + // First MFA prompt is made when creating the gateway. Then makeMustConnectMultiPortTCPAppGateway + // changes the target port twice, which means two more prompts. + // + // Then testGatewayCertRenewal expires the certs and calls + // makeMustConnectMultiPortTCPAppGateway. The first connection refreshes the expired cert, + // then the function changes the target port twice again, resulting in two more prompts. + wantPromptMFACallCount: 3 + 3, + }, + ) + }) }) t.Run("leaf cluster", func(t *testing.T) { - profileName := mustGetProfileName(t, pack.RootWebAddr()) - appURI := uri.NewClusterURI(profileName). - AppendLeafCluster(pack.LeafAppClusterName()). - AppendApp(pack.LeafAppName()) + t.Parallel() + + t.Run("web app", func(t *testing.T) { + t.Parallel() + + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName). + AppendLeafCluster(pack.LeafAppClusterName()). + AppendApp(pack.LeafAppName()) + + testAppGatewayCertRenewal(ctx, t, pack, makeTCAndWebauthnLogin, appURI) + }) + + t.Run("TCP app", func(t *testing.T) { + t.Parallel() + + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName).AppendLeafCluster(pack.LeafAppClusterName()).AppendApp(pack.LeafTCPAppName()) - // The test can potentially hang forever if something is wrong with the MFA prompt, add a timeout. - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + tc, webauthnLogin := makeTCAndWebauthnLogin(t) + + testGatewayCertRenewal( + ctx, + t, + gatewayCertRenewalParams{ + tc: tc, + createGatewayParams: daemon.CreateGatewayParams{TargetURI: appURI.String()}, + testGatewayConnection: makeMustConnectTCPAppGateway(pack.LeafTCPMessage()), + generateAndSetupUserCreds: pack.GenerateAndSetupUserCreds, + webauthnLogin: webauthnLogin, + }, + ) + }) + + t.Run("multi-port TCP app", func(t *testing.T) { + t.Parallel() + + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName).AppendLeafCluster(pack.LeafAppClusterName()).AppendApp(pack.LeafTCPMultiPortAppName()) + + tc, webauthnLogin := makeTCAndWebauthnLogin(t) + + testGatewayCertRenewal( + ctx, + t, + gatewayCertRenewalParams{ + tc: tc, + createGatewayParams: daemon.CreateGatewayParams{ + TargetURI: appURI.String(), + TargetSubresourceName: strconv.Itoa(pack.LeafTCPMultiPortAppPortAlpha()), + }, + testGatewayConnection: makeMustConnectMultiPortTCPAppGateway( + pack.LeafTCPMultiPortMessageAlpha(), pack.LeafTCPMultiPortAppPortBeta(), pack.LeafTCPMultiPortMessageBeta(), + ), + generateAndSetupUserCreds: pack.GenerateAndSetupUserCreds, + webauthnLogin: webauthnLogin, + // First MFA prompt is made when creating the gateway. Then makeMustConnectMultiPortTCPAppGateway + // changes the target port twice, which means two more prompts. + // + // Then testGatewayCertRenewal expires the certs and calls + // makeMustConnectMultiPortTCPAppGateway. The first connection refreshes the expired cert, + // then the function changes the target port twice again, resulting in two more prompts. + wantPromptMFACallCount: 3 + 3, + }, + ) + }) + }) +} + +func testTeletermAppGatewayTargetPortValidation(t *testing.T, pack *appaccess.Pack, makeTCAndWebauthnLogin makeTCAndWebauthnLoginFunc) { + t.Run("target port validation", func(t *testing.T) { + t.Parallel() + + tc, _ := makeTCAndWebauthnLogin(t) + err := tc.SaveProfile(false /* makeCurrent */) + require.NoError(t, err) + + storage, err := clusters.NewStorage(clusters.Config{ + Dir: tc.KeysDir, + InsecureSkipVerify: tc.InsecureSkipVerify, + HardwareKeyPromptConstructor: func(rootClusterURI uri.ResourceURI) keys.HardwareKeyPrompt { + return nil + }, + }) + require.NoError(t, err) + daemonService, err := daemon.New(daemon.Config{ + Storage: storage, + CreateTshdEventsClientCredsFunc: func() (grpc.DialOption, error) { + return grpc.WithTransportCredentials(insecure.NewCredentials()), nil + }, + CreateClientCacheFunc: func(newClient clientcache.NewClientFunc) (daemon.ClientCache, error) { + return clientcache.NewNoCache(newClient), nil + }, + KubeconfigsDir: t.TempDir(), + AgentsDir: t.TempDir(), + }) + require.NoError(t, err) + t.Cleanup(func() { + daemonService.Stop() + }) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) t.Cleanup(cancel) - testAppGatewayCertRenewal(ctx, t, pack, tc, appURI) + + // Here the test setup ends and actual test code starts. + profileName := mustGetProfileName(t, pack.RootWebAddr()) + appURI := uri.NewClusterURI(profileName).AppendApp(pack.RootTCPMultiPortAppName()) + + _, err = daemonService.CreateGateway(ctx, daemon.CreateGatewayParams{ + TargetURI: appURI.String(), + // 42 shouldn't be handed out to a non-root user when creating a listener on port 0, so it's + // unlikely that 42 is going to end up in the app spec. + TargetSubresourceName: "42", + }) + require.True(t, trace.IsBadParameter(err), "Expected BadParameter, got %v", err) + require.ErrorContains(t, err, "not included in target ports") + + gateway, err := daemonService.CreateGateway(ctx, daemon.CreateGatewayParams{ + TargetURI: appURI.String(), + TargetSubresourceName: strconv.Itoa(pack.RootTCPMultiPortAppPortAlpha()), + }) + require.NoError(t, err) + + _, err = daemonService.SetGatewayTargetSubresourceName(ctx, gateway.URI().String(), "42") + require.True(t, trace.IsBadParameter(err), "Expected BadParameter, got %v", err) + require.ErrorContains(t, err, "not included in target ports") }) } -func testAppGatewayCertRenewal(ctx context.Context, t *testing.T, pack *appaccess.Pack, tc *libclient.TeleportClient, appURI uri.ResourceURI) { +func testAppGatewayCertRenewal(ctx context.Context, t *testing.T, pack *appaccess.Pack, makeTCAndWebauthnLogin makeTCAndWebauthnLoginFunc, appURI uri.ResourceURI) { t.Helper() + tc, webauthnLogin := makeTCAndWebauthnLogin(t) testGatewayCertRenewal( ctx, @@ -713,9 +895,9 @@ func testAppGatewayCertRenewal(ctx context.Context, t *testing.T, pack *appacces createGatewayParams: daemon.CreateGatewayParams{ TargetURI: appURI.String(), }, - testGatewayConnectionFunc: mustConnectAppGateway, + testGatewayConnection: mustConnectWebAppGateway, generateAndSetupUserCreds: pack.GenerateAndSetupUserCreds, - webauthnLogin: tc.WebauthnLogin, + webauthnLogin: webauthnLogin, }, ) } diff --git a/lib/teleterm/apiserver/handler/handler_gateways.go b/lib/teleterm/apiserver/handler/handler_gateways.go index 5a303e8e45c78..dbb0de52c9363 100644 --- a/lib/teleterm/apiserver/handler/handler_gateways.go +++ b/lib/teleterm/apiserver/handler/handler_gateways.go @@ -119,7 +119,7 @@ func makeGatewayCLICommand(cmds cmd.Cmds) *api.GatewayCLICommand { // // In Connect this is used to update the db name of a db connection along with the CLI command. func (s *Handler) SetGatewayTargetSubresourceName(ctx context.Context, req *api.SetGatewayTargetSubresourceNameRequest) (*api.Gateway, error) { - gateway, err := s.DaemonService.SetGatewayTargetSubresourceName(req.GatewayUri, req.TargetSubresourceName) + gateway, err := s.DaemonService.SetGatewayTargetSubresourceName(ctx, req.GatewayUri, req.TargetSubresourceName) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/teleterm/clusters/cluster_apps.go b/lib/teleterm/clusters/cluster_apps.go index 5b92788cb15b4..cfdecb8a62f66 100644 --- a/lib/teleterm/clusters/cluster_apps.go +++ b/lib/teleterm/clusters/cluster_apps.go @@ -25,6 +25,7 @@ import ( apiclient "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" + apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/client" @@ -55,11 +56,11 @@ type SAMLIdPServiceProvider struct { Provider types.SAMLIdPServiceProvider } -func (c *Cluster) getApp(ctx context.Context, authClient authclient.ClientI, appName string) (types.Application, error) { +func GetApp(ctx context.Context, authClient authclient.ClientI, appName string) (types.Application, error) { var app types.Application err := AddMetadataToRetryableError(ctx, func() error { apps, err := apiclient.GetAllResources[types.AppServer](ctx, authClient, &proto.ListResourcesRequest{ - Namespace: c.clusterClient.Namespace, + Namespace: apidefaults.Namespace, ResourceType: types.KindAppServer, PredicateExpression: fmt.Sprintf(`name == "%s"`, appName), }) @@ -143,3 +144,29 @@ func (c *Cluster) GetAWSRoles(app types.Application) aws.Roles { } return aws.Roles{} } + +// ValidateTargetPort parses rawTargetPort to uint32 and checks if it's included in TCP ports of app. +// It also returns an error if app doesn't have any TCP ports defined. +func ValidateTargetPort(app types.Application, rawTargetPort string) (uint32, error) { + if rawTargetPort == "" { + return 0, nil + } + + targetPort, err := parseTargetPort(rawTargetPort) + if err != nil { + return 0, trace.Wrap(err) + } + + tcpPorts := app.GetTCPPorts() + if len(tcpPorts) == 0 { + return 0, trace.BadParameter("cannot specify target port %d because app %s does not provide access to multiple ports", + targetPort, app.GetName()) + } + + if !tcpPorts.Contains(int(targetPort)) { + return 0, trace.BadParameter("port %d is not included in target ports of app %s", + targetPort, app.GetName()) + } + + return targetPort, nil +} diff --git a/lib/teleterm/clusters/cluster_gateways.go b/lib/teleterm/clusters/cluster_gateways.go index 64577c35cf7dd..61c5fa7f38df4 100644 --- a/lib/teleterm/clusters/cluster_gateways.go +++ b/lib/teleterm/clusters/cluster_gateways.go @@ -21,6 +21,7 @@ package clusters import ( "context" "crypto/tls" + "strconv" "github.com/gravitational/trace" @@ -160,7 +161,7 @@ func (c *Cluster) createKubeGateway(ctx context.Context, params CreateGatewayPar func (c *Cluster) createAppGateway(ctx context.Context, params CreateGatewayParams) (gateway.Gateway, error) { appName := params.TargetURI.GetAppName() - app, err := c.getApp(ctx, params.ClusterClient.AuthClient, appName) + app, err := GetApp(ctx, params.ClusterClient.AuthClient, appName) if err != nil { return nil, trace.Wrap(err) } @@ -170,6 +171,13 @@ func (c *Cluster) createAppGateway(ctx context.Context, params CreateGatewayPara ClusterName: c.clusterClient.SiteName, URI: app.GetURI(), } + if params.TargetSubresourceName != "" { + targetPort, err := ValidateTargetPort(app, params.TargetSubresourceName) + if err != nil { + return nil, trace.Wrap(err) + } + routeToApp.TargetPort = targetPort + } var cert tls.Certificate if err := AddMetadataToRetryableError(ctx, func() error { @@ -182,6 +190,7 @@ func (c *Cluster) createAppGateway(ctx context.Context, params CreateGatewayPara gw, err := gateway.New(gateway.Config{ LocalPort: params.LocalPort, TargetURI: params.TargetURI, + TargetSubresourceName: params.TargetSubresourceName, TargetName: appName, Cert: cert, Protocol: app.GetProtocol(), @@ -195,6 +204,9 @@ func (c *Cluster) createAppGateway(ctx context.Context, params CreateGatewayPara RootClusterCACertPoolFunc: c.clusterClient.RootClusterCACertPool, ClusterName: c.Name, Username: c.status.Username, + // For multi-port TCP apps, the target port is stored in the target subresource name. Whenever + // that field is updated, the local proxy needs to generate a new cert which includes that port. + ClearCertsOnTargetSubresourceNameChange: true, }) return gw, trace.Wrap(err) } @@ -214,7 +226,7 @@ func (c *Cluster) ReissueGatewayCerts(ctx context.Context, clusterClient *client return cert, trace.Wrap(err) case g.TargetURI().IsApp(): appName := g.TargetURI().GetAppName() - app, err := c.getApp(ctx, clusterClient.AuthClient, appName) + app, err := GetApp(ctx, clusterClient.AuthClient, appName) if err != nil { return tls.Certificate{}, trace.Wrap(err) } @@ -224,6 +236,13 @@ func (c *Cluster) ReissueGatewayCerts(ctx context.Context, clusterClient *client ClusterName: c.clusterClient.SiteName, URI: app.GetURI(), } + if g.TargetSubresourceName() != "" { + targetPort, err := parseTargetPort(g.TargetSubresourceName()) + if err != nil { + return tls.Certificate{}, trace.BadParameter(err.Error()) + } + routeToApp.TargetPort = targetPort + } // The cert is returned from this function and finally set on LocalProxy by the middleware. cert, err := c.ReissueAppCert(ctx, clusterClient, routeToApp) @@ -232,3 +251,11 @@ func (c *Cluster) ReissueGatewayCerts(ctx context.Context, clusterClient *client return tls.Certificate{}, trace.NotImplemented("ReissueGatewayCerts does not support this gateway kind %v", g.TargetURI().String()) } } + +func parseTargetPort(rawTargetPort string) (uint32, error) { + targetPort, err := strconv.ParseUint(rawTargetPort, 10, 32) + if err != nil { + return 0, trace.BadParameter(err.Error()) + } + return uint32(targetPort), nil +} diff --git a/lib/teleterm/daemon/daemon.go b/lib/teleterm/daemon/daemon.go index d3528793a4b99..b27ded1ba205c 100644 --- a/lib/teleterm/daemon/daemon.go +++ b/lib/teleterm/daemon/daemon.go @@ -511,7 +511,7 @@ func (s *Service) GetGatewayCLICommand(ctx context.Context, gateway gateway.Gate // SetGatewayTargetSubresourceName updates the TargetSubresourceName field of a gateway stored in // s.gateways. -func (s *Service) SetGatewayTargetSubresourceName(gatewayURI, targetSubresourceName string) (gateway.Gateway, error) { +func (s *Service) SetGatewayTargetSubresourceName(ctx context.Context, gatewayURI, targetSubresourceName string) (gateway.Gateway, error) { s.mu.Lock() defer s.mu.Unlock() @@ -520,6 +520,28 @@ func (s *Service) SetGatewayTargetSubresourceName(gatewayURI, targetSubresourceN return nil, trace.Wrap(err) } + targetURI := gateway.TargetURI() + switch { + case targetURI.IsApp(): + clusterClient, err := s.GetCachedClient(ctx, targetURI) + if err != nil { + return nil, trace.Wrap(err) + } + + var app types.Application + if err := clusters.AddMetadataToRetryableError(ctx, func() error { + var err error + app, err = clusters.GetApp(ctx, clusterClient.CurrentCluster(), targetURI.GetAppName()) + return trace.Wrap(err) + }); err != nil { + return nil, trace.Wrap(err) + } + + if _, err := clusters.ValidateTargetPort(app, targetSubresourceName); err != nil { + return nil, trace.Wrap(err) + } + } + gateway.SetTargetSubresourceName(targetSubresourceName) return gateway, nil diff --git a/lib/teleterm/gateway/app.go b/lib/teleterm/gateway/app.go index 110d36604aeff..57b2753269a5a 100644 --- a/lib/teleterm/gateway/app.go +++ b/lib/teleterm/gateway/app.go @@ -19,8 +19,6 @@ package gateway import ( "context" "crypto/tls" - "net/url" - "strings" "github.com/gravitational/trace" @@ -33,15 +31,6 @@ type app struct { *base } -// LocalProxyURL returns the URL of the local proxy. -func (a *app) LocalProxyURL() string { - proxyURL := url.URL{ - Scheme: strings.ToLower(a.Protocol()), - Host: a.LocalAddress() + ":" + a.LocalPort(), - } - return proxyURL.String() -} - func makeAppGateway(cfg Config) (Gateway, error) { base, err := newBase(cfg) if err != nil { diff --git a/lib/teleterm/gateway/app_middleware.go b/lib/teleterm/gateway/app_middleware.go index 9b58de8624016..8f47425142d80 100644 --- a/lib/teleterm/gateway/app_middleware.go +++ b/lib/teleterm/gateway/app_middleware.go @@ -43,12 +43,12 @@ func (m *appMiddleware) OnNewConnection(ctx context.Context, lp *alpn.LocalProxy return nil } - // Return early and don't fire onExpiredCert if certs are invalid but not due to expiry. - if !errors.As(err, &x509.CertificateInvalidError{}) { + // Return early and don't fire onExpiredCert if certs are invalid but not due to expiry or removal. + if !errors.As(err, &x509.CertificateInvalidError{}) && !trace.IsNotFound(err) { return trace.Wrap(err) } - m.logger.DebugContext(ctx, "Gateway certificates have expired", "error", err) + m.logger.DebugContext(ctx, "Gateway certificates have expired or been removed", "error", err) cert, err := m.onExpiredCert(ctx) if err != nil { diff --git a/lib/teleterm/gateway/base.go b/lib/teleterm/gateway/base.go index 3a8b076307c60..41d407ca0d8d7 100644 --- a/lib/teleterm/gateway/base.go +++ b/lib/teleterm/gateway/base.go @@ -20,10 +20,12 @@ package gateway import ( "context" + "crypto/tls" "fmt" "log/slog" "net" "strconv" + "sync" "github.com/gravitational/trace" @@ -89,6 +91,9 @@ func newBase(cfg Config) (*base, error) { // Close terminates gateway connection. Fails if called on an already closed gateway. func (b *base) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + b.closeCancel() var errs []error @@ -158,17 +163,29 @@ func (b *base) TargetUser() string { } func (b *base) TargetSubresourceName() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.cfg.TargetSubresourceName } func (b *base) SetTargetSubresourceName(value string) { + b.mu.Lock() + defer b.mu.Unlock() b.cfg.TargetSubresourceName = value + + if b.cfg.ClearCertsOnTargetSubresourceNameChange { + b.Log().InfoContext(b.closeContext, "Clearing cert") + b.localProxy.SetCert(tls.Certificate{}) + } } func (b *base) Log() *slog.Logger { return b.cfg.Logger } +// LocalAddress returns the local host in the net package terms (localhost or 127.0.0.1, depending +// on the platform). func (b *base) LocalAddress() string { return b.cfg.LocalAddress } @@ -187,15 +204,13 @@ func (b *base) LocalPortInt() int { } func (b *base) cloneConfig() Config { + b.mu.RLock() + defer b.mu.RUnlock() + return *b.cfg } -// Gateway describes local proxy that creates a gateway to the remote Teleport resource. -// -// Gateway is not safe for concurrent use in itself. However, all access to gateways is gated by -// daemon.Service which obtains a lock for any operation pertaining to gateways. -// -// In the future if Gateway becomes more complex it might be worthwhile to add an RWMutex to it. +// Gateway is a local proxy to a remote Teleport resource. type base struct { cfg *Config localProxy *alpn.LocalProxy @@ -206,6 +221,7 @@ type base struct { // that the local proxy is now closed and to release any resources. closeContext context.Context closeCancel context.CancelFunc + mu sync.RWMutex } type TCPPortAllocator interface { diff --git a/lib/teleterm/gateway/config.go b/lib/teleterm/gateway/config.go index 67768d05900db..a4877d20d8394 100644 --- a/lib/teleterm/gateway/config.go +++ b/lib/teleterm/gateway/config.go @@ -91,6 +91,11 @@ type Config struct { RootClusterCACertPoolFunc alpnproxy.GetClusterCACertPoolFunc // KubeconfigsDir is the directory containing kubeconfigs for kube gateways. KubeconfigsDir string + // ClearCertsOnTargetSubresourceNameChange is useful in situations where TargetSubresourceName is + // used to generate a cert. In that case, after TargetSubresourceName is changed, the gateway will + // clear the cert from the local proxy and the middleware is going to request a new cert on the + // next connection. + ClearCertsOnTargetSubresourceNameChange bool } // OnExpiredCertFunc is the type of a function that is called when a new downstream connection is diff --git a/lib/teleterm/gateway/interfaces.go b/lib/teleterm/gateway/interfaces.go index 27bc6735a2b9d..9d102d788f041 100644 --- a/lib/teleterm/gateway/interfaces.go +++ b/lib/teleterm/gateway/interfaces.go @@ -43,6 +43,8 @@ type Gateway interface { TargetSubresourceName() string SetTargetSubresourceName(value string) Log() *slog.Logger + // LocalAddress returns the local host in the net package terms (localhost or 127.0.0.1, depending + // on the platform). LocalAddress() string LocalPort() string LocalPortInt() int @@ -95,7 +97,4 @@ type Kube interface { // App defines an app gateway. type App interface { Gateway - - // LocalProxyURL returns the URL of the local proxy. - LocalProxyURL() string } diff --git a/lib/teleterm/gateway/kube.go b/lib/teleterm/gateway/kube.go index 1dccb4189accc..d39f925bd75bf 100644 --- a/lib/teleterm/gateway/kube.go +++ b/lib/teleterm/gateway/kube.go @@ -187,6 +187,8 @@ func (k *kube) makeForwardProxyForKube() error { } func (k *kube) writeKubeconfig(key *keys.PrivateKey, cas map[string]tls.Certificate) error { + k.base.mu.RLock() + defer k.base.mu.RUnlock() ca, ok := cas[k.cfg.ClusterName] if !ok { return trace.BadParameter("CA for teleport cluster %q is missing", k.cfg.ClusterName) diff --git a/proto/teleport/lib/teleterm/v1/gateway.proto b/proto/teleport/lib/teleterm/v1/gateway.proto index 7661a6bf31f4a..4399fcc307e26 100644 --- a/proto/teleport/lib/teleterm/v1/gateway.proto +++ b/proto/teleport/lib/teleterm/v1/gateway.proto @@ -43,12 +43,13 @@ message Gateway { string local_address = 5; // local_port is the gateway address on localhost string local_port = 6; - // protocol is the gateway protocol + // protocol is the protocol used by the gateway. For databases, it matches the type of the + // database that the gateway targets. For apps, it's either "HTTP" or "TCP". string protocol = 7; reserved 8; reserved "cli_command"; // target_subresource_name points at a subresource of the remote resource, for example a - // database name on a database server. + // database name on a database server or a target port of a multi-port TCP app. string target_subresource_name = 9; // gateway_cli_client represents a command that the user can execute to connect to the resource // through the gateway. diff --git a/web/packages/design/src/Input/Input.tsx b/web/packages/design/src/Input/Input.tsx index 3cf50e9d1009b..fe3b7feca968c 100644 --- a/web/packages/design/src/Input/Input.tsx +++ b/web/packages/design/src/Input/Input.tsx @@ -70,6 +70,7 @@ interface InputProps extends ColorProps, SpaceProps, WidthProps, HeightProps { inputMode?: InputMode; spellCheck?: boolean; style?: React.CSSProperties; + required?: boolean; 'aria-invalid'?: HTMLAttributes<'input'>['aria-invalid']; 'aria-describedby'?: HTMLAttributes<'input'>['aria-describedby']; @@ -170,6 +171,7 @@ const Input = forwardRef((props, ref) => { inputMode, spellCheck, style, + required, 'aria-invalid': ariaInvalid, 'aria-describedby': ariaDescribedBy, @@ -222,6 +224,7 @@ const Input = forwardRef((props, ref) => { inputMode, spellCheck, style, + required, 'aria-invalid': ariaInvalid, 'aria-describedby': ariaDescribedBy, diff --git a/web/packages/design/src/Menu/Menu.story.tsx b/web/packages/design/src/Menu/Menu.story.tsx index c7b0726ea414b..c3ba4ae802762 100644 --- a/web/packages/design/src/Menu/Menu.story.tsx +++ b/web/packages/design/src/Menu/Menu.story.tsx @@ -107,6 +107,18 @@ export const MenuItems = () => ( Amet nisi tempor + +

Label as first child

+ + Tempus ut libero + Lorem ipsum + Dolor sit amet + + Leo vitae arcu + Donec volutpat + Mauris sit + +
); diff --git a/web/packages/design/src/Menu/MenuItem.tsx b/web/packages/design/src/Menu/MenuItem.tsx index 5ccbae227c835..a9b06373bd787 100644 --- a/web/packages/design/src/Menu/MenuItem.tsx +++ b/web/packages/design/src/Menu/MenuItem.tsx @@ -71,37 +71,39 @@ const MenuItemBase = styled(Flex)` ${fromThemeBase} `; -export const MenuItemSectionLabel = styled(MenuItemBase).attrs({ - px: 2, +export const MenuItemSectionSeparator = styled.hr.attrs({ onClick: event => { // Make sure that clicks on this element don't trigger onClick set on MenuList. event.stopPropagation(); }, })` - font-weight: bold; - min-height: 16px; + background: ${props => props.theme.colors.interactive.tonal.neutral[1]}; + height: 1px; + border: 0; + font-size: 0; `; -export const MenuItemSectionSeparator = styled.hr.attrs({ +export const MenuItemSectionLabel = styled(MenuItemBase).attrs({ + px: 2, onClick: event => { // Make sure that clicks on this element don't trigger onClick set on MenuList. event.stopPropagation(); }, })` - background: ${props => props.theme.colors.interactive.tonal.neutral[1]}; - height: 1px; - border: 0; - font-size: 0; + font-weight: bold; + min-height: 16px; - // Add padding to the label for extra visual space, but only when it follows a separator. - // If a separator follows a MenuItem, there's already enough visual space, so no extra space is - // needed. The hover state of MenuItem highlights everything right from the separator start to the - // end of MenuItem. + // Add padding to the label for extra visual space, but only when it follows a separator or is the + // first child. + // + // If a separator follows a MenuItem, there's already enough visual space between MenuItem and + // separator, so no extra space is needed. The hover state of MenuItem highlights everything right + // from the separator start to the end of MenuItem. // // Padding is used instead of margin here on purpose, so that there's no empty transparent space // between Separator and Label – otherwise clicking on that space would count as a click on // MenuList and not trigger onClick set on Separator or Label. - & + ${MenuItemSectionLabel} { + ${MenuItemSectionSeparator} + &, &:first-child { padding-top: ${props => props.theme.space[1]}px; } `; diff --git a/web/packages/design/src/keyframes.ts b/web/packages/design/src/keyframes.ts index c49799db9f67f..a3a7bf96f7245 100644 --- a/web/packages/design/src/keyframes.ts +++ b/web/packages/design/src/keyframes.ts @@ -46,3 +46,7 @@ export const blink = keyframes` opacity: 100%; } `; + +export const disappear = keyframes` +to { opacity: 0; } +`; diff --git a/web/packages/shared/components/FieldInput/FieldInput.tsx b/web/packages/shared/components/FieldInput/FieldInput.tsx index 2ac28f54e810c..2f3a3eb012550 100644 --- a/web/packages/shared/components/FieldInput/FieldInput.tsx +++ b/web/packages/shared/components/FieldInput/FieldInput.tsx @@ -59,6 +59,7 @@ const FieldInput = forwardRef( toolTipContent = null, disabled = false, markAsError = false, + required = false, ...styles }, ref @@ -94,6 +95,7 @@ const FieldInput = forwardRef( size={size} aria-invalid={hasError || markAsError} aria-describedby={helperTextId} + required={required} /> ); @@ -219,7 +221,7 @@ export type FieldInputProps = BoxProps & { id?: string; name?: string; value?: string; - label?: string; + label?: React.ReactNode; helperText?: React.ReactNode; icon?: React.ComponentType; size?: InputSize; @@ -245,4 +247,5 @@ export type FieldInputProps = BoxProps & { // input box as error color before validator // runs (which marks it as error) markAsError?: boolean; + required?: boolean; }; diff --git a/web/packages/teleterm/src/services/tshd/testHelpers.ts b/web/packages/teleterm/src/services/tshd/testHelpers.ts index b19fc95725192..8cb15ec3e3701 100644 --- a/web/packages/teleterm/src/services/tshd/testHelpers.ts +++ b/web/packages/teleterm/src/services/tshd/testHelpers.ts @@ -290,7 +290,7 @@ export const makeAppGateway = ( targetUri: appUri, localAddress: 'localhost', localPort: '1337', - targetSubresourceName: 'bar', + targetSubresourceName: undefined, gatewayCliCommand: { path: '', preview: 'curl http://localhost:1337', diff --git a/web/packages/teleterm/src/ui/DocumentCluster/ActionButtons.tsx b/web/packages/teleterm/src/ui/DocumentCluster/ActionButtons.tsx index c16dd1d5fa779..39147660e843e 100644 --- a/web/packages/teleterm/src/ui/DocumentCluster/ActionButtons.tsx +++ b/web/packages/teleterm/src/ui/DocumentCluster/ActionButtons.tsx @@ -23,7 +23,7 @@ import { MenuItemSectionLabel, MenuItemSectionSeparator, } from 'design/Menu/MenuItem'; -import { App } from 'gen-proto-ts/teleport/lib/teleterm/v1/app_pb'; +import { App, PortRange } from 'gen-proto-ts/teleport/lib/teleterm/v1/app_pb'; import { Cluster } from 'gen-proto-ts/teleport/lib/teleterm/v1/cluster_pb'; import { Database } from 'gen-proto-ts/teleport/lib/teleterm/v1/database_pb'; import { Kube } from 'gen-proto-ts/teleport/lib/teleterm/v1/kube_pb'; @@ -125,8 +125,11 @@ export function ConnectAppActionButton(props: { app: App }): React.JSX.Element { connectToAppWithVnet(appContext, launchVnet, props.app, targetPort); } - function setUpGateway(): void { - setUpAppGateway(appContext, props.app, { origin: 'resource_table' }); + function setUpGateway(targetPort?: number): void { + setUpAppGateway(appContext, props.app, { + telemetry: { origin: 'resource_table' }, + targetPort, + }); } const rootCluster = appContext.clustersService.findCluster( @@ -229,7 +232,7 @@ function AppButton(props: { cluster: Cluster; rootCluster: Cluster; connectWithVnet(targetPort?: number): void; - setUpGateway(): void; + setUpGateway(targetPort?: number): void; onLaunchUrl(): void; isVnetSupported: boolean; }) { @@ -285,37 +288,15 @@ function AppButton(props: { target="_blank" title="Launch the app in the browser" > - Set up connection + props.setUpGateway()}> + Set up connection + ); } // TCP app with VNet. if (props.isVnetSupported) { - let $targetPorts: JSX.Element; - if (props.app.tcpPorts.length) { - $targetPorts = ( - <> - - Available target ports - {props.app.tcpPorts.map((portRange, index) => ( - props.connectWithVnet(portRange.port)} - > - {formatPortRange(portRange)} - - ))} - - ); - } - return ( props.connectWithVnet()} > - Connect without VNet - {$targetPorts} + props.setUpGateway()}> + Connect without VNet + + {!!props.app.tcpPorts.length && ( + <> + + props.connectWithVnet(port)} + /> + + )} + + ); + } + + // Multi-port TCP app without VNet. + if (props.app.tcpPorts.length) { + return ( + props.setUpGateway()} + > + props.setUpGateway(port)} + /> ); } - // TCP app without VNet. + // Single-port TCP app without VNet. return ( props.setUpGateway()} textTransform="none" > Connect @@ -341,6 +349,29 @@ function AppButton(props: { ); } +const AvailableTargetPorts = (props: { + tcpPorts: PortRange[]; + onItemClick: (portRangePort: number) => void; +}) => ( + <> + Available target ports + {props.tcpPorts.map((portRange, index) => ( + props.onItemClick(portRange.port)} + > + {formatPortRange(portRange)} + + ))} + +); + export function AccessRequestButton(props: { isResourceAdded: boolean; requestStarted: boolean; diff --git a/web/packages/teleterm/src/ui/DocumentGateway/useGateway.ts b/web/packages/teleterm/src/ui/DocumentGateway/useGateway.ts index 1c08bae058742..743667ebfa662 100644 --- a/web/packages/teleterm/src/ui/DocumentGateway/useGateway.ts +++ b/web/packages/teleterm/src/ui/DocumentGateway/useGateway.ts @@ -30,6 +30,7 @@ import { retryWithRelogin } from 'teleterm/ui/utils'; export function useGateway(doc: DocumentGateway) { const ctx = useAppContext(); + const { clustersService } = ctx; const { documentsService } = useWorkspaceContext(); // The port to show as default in the input field in case creating a gateway fails. // This is typically the case if someone reopens the app and the port of the gateway is already @@ -51,7 +52,7 @@ export function useGateway(doc: DocumentGateway) { try { gw = await retryWithRelogin(ctx, doc.targetUri, () => - ctx.clustersService.createGateway({ + clustersService.createGateway({ targetUri: doc.targetUri, localPort: port, targetUser: doc.targetUser, @@ -92,34 +93,52 @@ export function useGateway(doc: DocumentGateway) { }); const [disconnectAttempt, disconnect] = useAsync(async () => { - await ctx.clustersService.removeGateway(doc.gatewayUri); + await clustersService.removeGateway(doc.gatewayUri); documentsService.close(doc.uri); }); const [changeTargetSubresourceNameAttempt, changeTargetSubresourceName] = - useAsync(async (name: string) => { - const updatedGateway = - await ctx.clustersService.setGatewayTargetSubresourceName( - doc.gatewayUri, - name - ); + useAsync( + useCallback( + (name: string) => + retryWithRelogin(ctx, doc.targetUri, async () => { + const updatedGateway = + await clustersService.setGatewayTargetSubresourceName( + doc.gatewayUri, + name + ); - documentsService.update(doc.uri, { - targetSubresourceName: updatedGateway.targetSubresourceName, - }); - }); - - const [changePortAttempt, changePort] = useAsync(async (port: string) => { - const updatedGateway = await ctx.clustersService.setGatewayLocalPort( - doc.gatewayUri, - port + documentsService.update(doc.uri, { + targetSubresourceName: updatedGateway.targetSubresourceName, + }); + }), + [ + clustersService, + documentsService, + doc.uri, + doc.gatewayUri, + ctx, + doc.targetUri, + ] + ) ); - documentsService.update(doc.uri, { - targetSubresourceName: updatedGateway.targetSubresourceName, - port: updatedGateway.localPort, - }); - }); + const [changePortAttempt, changePort] = useAsync( + useCallback( + async (port: string) => { + const updatedGateway = await clustersService.setGatewayLocalPort( + doc.gatewayUri, + port + ); + + documentsService.update(doc.uri, { + targetSubresourceName: updatedGateway.targetSubresourceName, + port: updatedGateway.localPort, + }); + }, + [clustersService, documentsService, doc.uri, doc.gatewayUri] + ) + ); useEffect( function createGatewayOnMount() { diff --git a/web/packages/teleterm/src/ui/DocumentGatewayApp/AppGateway.tsx b/web/packages/teleterm/src/ui/DocumentGatewayApp/AppGateway.tsx index 1c2981ce9f42b..bd31e84d80035 100644 --- a/web/packages/teleterm/src/ui/DocumentGatewayApp/AppGateway.tsx +++ b/web/packages/teleterm/src/ui/DocumentGatewayApp/AppGateway.tsx @@ -16,18 +16,27 @@ * along with this program. If not, see . */ -import { useMemo, useRef } from 'react'; +import { + ChangeEvent, + ChangeEventHandler, + PropsWithChildren, + useEffect, + useMemo, + useState, +} from 'react'; +import styled from 'styled-components'; import { Alert, - Box, ButtonSecondary, + disappear, Flex, H1, - Indicator, Link, + rotate360, Text, } from 'design'; +import { Check, Spinner } from 'design/Icon'; import { Gateway } from 'gen-proto-ts/teleport/lib/teleterm/v1/gateway_pb'; import { TextSelectCopy } from 'shared/components/TextSelectCopy'; import Validation from 'shared/components/Validation'; @@ -39,68 +48,110 @@ import { PortFieldInput } from '../components/FieldInputs'; export function AppGateway(props: { gateway: Gateway; disconnectAttempt: Attempt; - changePort(port: string): void; - changePortAttempt: Attempt; + changeLocalPort(port: string): void; + changeLocalPortAttempt: Attempt; + changeTargetPort(port: string): void; + changeTargetPortAttempt: Attempt; disconnect(): void; }) { const { gateway } = props; - const formRef = useRef(); - const { changePort } = props; - const handleChangePort = useMemo(() => { - return debounce((value: string) => { - if (formRef.current.reportValidity()) { - changePort(value); - } - }, 1000); - }, [changePort]); + const { + changeLocalPort, + changeLocalPortAttempt, + changeTargetPort, + changeTargetPortAttempt, + disconnectAttempt, + } = props; + // It must be possible to update local port while target port is invalid, hence why + // useDebouncedPortChangeHandler checks the validity of only one input at a time. Otherwise the UI + // would lose updates to the local port while the target port was invalid. + const handleLocalPortChange = useDebouncedPortChangeHandler(changeLocalPort); + const handleTargetPortChange = + useDebouncedPortChangeHandler(changeTargetPort); let address = `${gateway.localAddress}:${gateway.localPort}`; if (gateway.protocol === 'HTTP') { address = `http://${address}`; } + // AppGateway doesn't have access to the app resource itself, so it has to decide whether the + // app is multi-port or not in some other way. + // For multi-port apps, DocumentGateway comes with targetSubresourceName prefilled to the first + // port number found in TCP ports. Single-port apps have this field empty. + // So, if targetSubresourceName is present, then the app must be multi-port. In this case, the + // user is free to change it and can never provide an empty targetSubresourceName. + // When the app is not multi-port, targetSubresourceName is empty and the user cannot change it. + const isMultiPort = + gateway.protocol === 'TCP' && gateway.targetSubresourceName; + return ( - - + +

App Connection

Close Connection
- {props.disconnectAttempt.status === 'error' && ( - + {disconnectAttempt.status === 'error' && ( + Could not close the connection )} - + + } defaultValue={gateway.localPort} - onChange={e => handleChangePort(e.target.value)} - mb={2} + onChange={handleLocalPortChange} + mb={0} /> + {isMultiPort && ( + + } + required + defaultValue={gateway.targetSubresourceName} + onChange={handleTargetPortChange} + mb={0} + /> + )} - {props.changePortAttempt.status === 'processing' && ( - - )} - Access the app at: - +
+ Access the app at: + +
- {props.changePortAttempt.status === 'error' && ( - - Could not change the port number + {changeLocalPortAttempt.status === 'error' && ( + + Could not change the local port + + )} + + {changeTargetPortAttempt.status === 'error' && ( + + Could not change the target port )} @@ -115,6 +166,89 @@ export function AppGateway(props: { {' '} for more details. -
+ ); } + +const LabelWithAttemptStatus = (props: { + text: string; + attempt: Attempt; +}) => ( + + {props.text} + {props.attempt.status === 'processing' && ( + + )} + {props.attempt.status === 'success' && ( + // CSS animations are repeated whenever the parent goes from `display: none` to something + // else. As a result, we need to unmount the animated check so that the animation is not + // repeated when the user switches to this tab. + // https://www.w3.org/TR/css-animations-1/#example-4e34d7ba + + + + )} + +); + +/** + * useDebouncedPortChangeHandler returns a debounced change handler that calls the change function + * only if the input from which the event originated is valid. + */ +const useDebouncedPortChangeHandler = ( + changeFunc: (port: string) => void +): ChangeEventHandler => + useMemo( + () => + debounce((event: ChangeEvent) => { + if (event.target.reportValidity()) { + changeFunc(event.target.value); + } + }, 1000), + [changeFunc] + ); + +const AnimatedSpinner = styled(Spinner)` + animation: ${rotate360} 1.5s infinite linear; + // The spinner needs to be positioned absolutely so that the fact that it's spinning + // doesn't affect the size of the parent. + position: absolute; + right: 0; + top: 0; +`; + +const disappearanceDelayMs = 1000; +const disappearanceDurationMs = 200; + +const DisappearingCheck = styled(Check)` + opacity: 1; + animation: ${disappear}; + animation-delay: ${disappearanceDelayMs}ms; + animation-duration: ${disappearanceDurationMs}ms; + animation-fill-mode: forwards; +`; + +const UnmountAfter = ({ + timeoutMs, + children, +}: PropsWithChildren<{ timeoutMs: number }>) => { + const [isMounted, setIsMounted] = useState(true); + + useEffect(() => { + const timeout = setTimeout(() => { + setIsMounted(false); + }, timeoutMs); + + return () => { + clearTimeout(timeout); + }; + }, [timeoutMs]); + + return isMounted ? children : null; +}; diff --git a/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.story.tsx b/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.story.tsx index c0b4ec802b28e..936f1c8a399b1 100644 --- a/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.story.tsx +++ b/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.story.tsx @@ -30,9 +30,10 @@ import { MockWorkspaceContextProvider } from 'teleterm/ui/fixtures/MockWorkspace import * as types from 'teleterm/ui/services/workspacesService'; type StoryProps = { - appType: 'web' | 'tcp'; + appType: 'web' | 'tcp' | 'tcp-multi-port'; online: boolean; - changePort: 'succeed' | 'throw-error'; + changeLocalPort: 'succeed' | 'throw-error'; + changeTargetPort: 'succeed' | 'throw-error'; disconnect: 'succeed' | 'throw-error'; }; @@ -42,9 +43,14 @@ const meta: Meta = { argTypes: { appType: { control: { type: 'radio' }, - options: ['web', 'tcp'], + options: ['web', 'tcp', 'tcp-multi-port'], }, - changePort: { + changeLocalPort: { + if: { arg: 'online' }, + control: { type: 'radio' }, + options: ['succeed', 'throw-error'], + }, + changeTargetPort: { if: { arg: 'online' }, control: { type: 'radio' }, options: ['succeed', 'throw-error'], @@ -58,7 +64,8 @@ const meta: Meta = { args: { appType: 'web', online: true, - changePort: 'succeed', + changeLocalPort: 'succeed', + changeTargetPort: 'succeed', disconnect: 'succeed', }, }; @@ -70,6 +77,10 @@ export function Story(props: StoryProps) { if (props.appType === 'tcp') { gateway.protocol = 'TCP'; } + if (props.appType === 'tcp-multi-port') { + gateway.protocol = 'TCP'; + gateway.targetSubresourceName = '4242'; + } const documentGateway: types.DocumentGateway = { kind: 'doc.gateway', targetUri: '/clusters/bar/apps/quux', @@ -80,10 +91,14 @@ export function Story(props: StoryProps) { targetUser: '', status: '', targetName: 'quux', + targetSubresourceName: undefined, }; if (!props.online) { documentGateway.gatewayUri = undefined; } + if (props.appType === 'tcp-multi-port') { + documentGateway.targetSubresourceName = '4242'; + } const appContext = new MockAppContext(); appContext.workspacesService.setState(draftState => { @@ -105,8 +120,26 @@ export function Story(props: StoryProps) { wait(1000).then( () => new MockedUnaryCall( - { ...gateway, localPort }, - props.changePort === 'throw-error' + { + ...appContext.clustersService.findGateway(gateway.uri), + localPort, + }, + props.changeLocalPort === 'throw-error' + ? new Error('something went wrong') + : undefined + ) + ); + appContext.tshd.setGatewayTargetSubresourceName = ({ + targetSubresourceName, + }) => + wait(1000).then( + () => + new MockedUnaryCall( + { + ...appContext.clustersService.findGateway(gateway.uri), + targetSubresourceName, + }, + props.changeTargetPort === 'throw-error' ? new Error('something went wrong') : undefined ) diff --git a/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.tsx b/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.tsx index ba70a7dfbdbe3..24db9f673be64 100644 --- a/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.tsx +++ b/web/packages/teleterm/src/ui/DocumentGatewayApp/DocumentGatewayApp.tsx @@ -29,13 +29,15 @@ export function DocumentGatewayApp(props: { const { doc } = props; const { gateway, - changePort, - changePortAttempt, + changePort: changeLocalPort, + changePortAttempt: changeLocalPortAttempt, connected, connectAttempt, disconnect, disconnectAttempt, reconnect, + changeTargetSubresourceName: changeTargetPort, + changeTargetSubresourceNameAttempt: changeTargetPortAttempt, } = useGateway(doc); return ( @@ -47,14 +49,17 @@ export function DocumentGatewayApp(props: { targetName={doc.targetName} gatewayPort={{ isSupported: true, defaultPort: doc.port }} reconnect={reconnect} + portFieldLabel="Local Port (optional)" /> ) : ( )} diff --git a/web/packages/teleterm/src/ui/TabHost/useTabShortcuts.test.tsx b/web/packages/teleterm/src/ui/TabHost/useTabShortcuts.test.tsx index ce65290c2eb1f..b8fe467178b54 100644 --- a/web/packages/teleterm/src/ui/TabHost/useTabShortcuts.test.tsx +++ b/web/packages/teleterm/src/ui/TabHost/useTabShortcuts.test.tsx @@ -55,6 +55,7 @@ function getMockDocuments(): Document[] { targetUri: '/clusters/bar/dbs/foobar', targetName: 'foobar', targetUser: 'foo', + targetSubresourceName: undefined, origin: 'resource_table', status: '', }, @@ -66,6 +67,7 @@ function getMockDocuments(): Document[] { targetUri: '/clusters/bar/dbs/foobar', targetName: 'foobar', targetUser: 'bar', + targetSubresourceName: undefined, origin: 'resource_table', status: '', }, diff --git a/web/packages/teleterm/src/ui/components/FieldInputs.tsx b/web/packages/teleterm/src/ui/components/FieldInputs.tsx index 21086d8f9bb23..7e7d57e4ec40f 100644 --- a/web/packages/teleterm/src/ui/components/FieldInputs.tsx +++ b/web/packages/teleterm/src/ui/components/FieldInputs.tsx @@ -16,23 +16,26 @@ * along with this program. If not, see . */ -import { forwardRef } from 'react'; +import styled from 'styled-components'; -import FieldInput, { FieldInputProps } from 'shared/components/FieldInput'; +import FieldInput from 'shared/components/FieldInput'; -export const ConfigFieldInput = forwardRef( - (props, ref) => -); +export const ConfigFieldInput = styled(FieldInput).attrs({ size: 'small' })` + input { + &:invalid, + &:invalid:hover { + border-color: ${props => + props.theme.colors.interactive.solid.danger.default}; + } + } +`; -export const PortFieldInput = forwardRef( - (props, ref) => ( - - ) -); +export const PortFieldInput = styled(ConfigFieldInput).attrs({ + type: 'number', + min: 1, + max: 65535, + // Without a min width, the stepper controls end up being to close to a long port number such + // as 65535. minWidth instead of width allows the field to grow with the label, so that e.g. + // a custom label of "Local Port (optional)" is displayed on a single line. + minWidth: '110px', +})``; diff --git a/web/packages/teleterm/src/ui/components/OfflineGateway.tsx b/web/packages/teleterm/src/ui/components/OfflineGateway.tsx index 500a85951ba9a..2dbc1027565ee 100644 --- a/web/packages/teleterm/src/ui/components/OfflineGateway.tsx +++ b/web/packages/teleterm/src/ui/components/OfflineGateway.tsx @@ -36,7 +36,9 @@ export function OfflineGateway(props: { targetName: string; /** Gateway kind displayed in the UI, for example, 'database'. */ gatewayKind: string; + portFieldLabel?: string; }) { + const portFieldLabel = props.portFieldLabel || 'Port (optional)'; const defaultPort = props.gatewayPort.isSupported ? props.gatewayPort.defaultPort : undefined; @@ -88,7 +90,7 @@ export function OfflineGateway(props: { {props.gatewayPort.isSupported && ( { describe('setUpAppGateway', () => { test.each([ { - name: 'creates tunnel for a tcp app', + name: 'creates tunnel for a single-port TCP app', app: makeApp({ endpointUri: 'tcp://localhost:3000', }), }, + { + name: 'creates tunnel for a multi-port TCP app', + app: makeApp({ + endpointUri: 'tcp://localhost', + tcpPorts: [{ port: 1234, endPort: 0 }], + }), + expectedTargetSubresourceName: '1234', + }, + { + name: 'creates tunnel for a multi-port TCP app with a preselected target port', + app: makeApp({ + endpointUri: 'tcp://localhost', + tcpPorts: [{ port: 1234, endPort: 0 }], + }), + targetPort: 1234, + }, { name: 'creates tunnel for a web app', app: makeApp({ endpointUri: 'http://localhost:3000', }), }, - ])('$name', async ({ app }) => { + ])('$name', async ({ app, targetPort, expectedTargetSubresourceName }) => { const appContext = new MockAppContext(); setTestCluster(appContext); - await setUpAppGateway(appContext, app, { origin: 'resource_table' }); + await setUpAppGateway(appContext, app, { + telemetry: { origin: 'resource_table' }, + targetPort, + }); const documents = appContext.workspacesService .getActiveWorkspaceDocumentService() .getGatewayDocuments(); @@ -147,7 +166,8 @@ describe('setUpAppGateway', () => { port: undefined, status: '', targetName: 'foo', - targetSubresourceName: undefined, + targetSubresourceName: + expectedTargetSubresourceName || targetPort?.toString() || undefined, targetUri: '/clusters/teleport-local/apps/foo', targetUser: '', title: 'foo', diff --git a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/connectToApp.ts b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/connectToApp.ts index 93aee047a7341..2711bae403b0d 100644 --- a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/connectToApp.ts +++ b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/connectToApp.ts @@ -115,13 +115,21 @@ export async function connectToApp( return; } - await setUpAppGateway(ctx, target, telemetry); + await setUpAppGateway(ctx, target, { telemetry }); } export async function setUpAppGateway( ctx: IAppContext, target: App, - telemetry: { origin: DocumentOrigin } + options: { + telemetry: { origin: DocumentOrigin }; + /** + * targetPort allows the caller to preselect the target port for the gateway. Works only with + * multi-port TCP apps. If it's not specified and the app is multi-port, the first port from + * it's TCP ports is used instead. + */ + targetPort?: number; + } ) { const rootClusterUri = routing.ensureRootClusterUri(target.uri); @@ -129,16 +137,20 @@ export async function setUpAppGateway( ctx.workspacesService.getWorkspaceDocumentService(rootClusterUri); const doc = documentsService.createGatewayDocument({ targetUri: target.uri, - origin: telemetry.origin, + origin: options.telemetry.origin, targetName: routing.parseAppUri(target.uri).params.appId, targetUser: '', + targetSubresourceName: + target.tcpPorts.length > 0 + ? (options.targetPort || target.tcpPorts[0].port).toString() + : undefined, }); const connectionToReuse = ctx.connectionTracker.findConnectionByDocument(doc); if (connectionToReuse) { await ctx.connectionTracker.activateItem(connectionToReuse.id, { - origin: telemetry.origin, + origin: options.telemetry.origin, }); } else { await ctx.workspacesService.setActiveWorkspace(rootClusterUri); diff --git a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/documentsService.test.ts b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/documentsService.test.ts index b50989a4273ff..96d1f3129ea24 100644 --- a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/documentsService.test.ts +++ b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/documentsService.test.ts @@ -79,6 +79,7 @@ describe('document should be added', () => { targetUri: '/clusters/bar/dbs/quux', targetName: 'quux', targetUser: 'foo', + targetSubresourceName: undefined, origin: 'resource_table', status: '', }; @@ -155,6 +156,7 @@ test('only gateway documents should be returned', () => { targetUri: '/clusters/bar/dbs/quux', targetName: 'quux', targetUser: 'foo', + targetSubresourceName: undefined, origin: 'resource_table', status: '', }; diff --git a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts index 970fb09d22ba7..e975d8e268ae7 100644 --- a/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts +++ b/web/packages/teleterm/src/ui/services/workspacesService/documentsService/types.ts @@ -109,7 +109,11 @@ export interface DocumentGateway extends DocumentBase { targetUri: uri.DatabaseUri | uri.AppUri; targetUser: string; targetName: string; - targetSubresourceName?: string; + /** + * targetSubresourceName contains database name for db gateways and target port for TCP app + * gateways. + */ + targetSubresourceName: string | undefined; port?: string; origin: DocumentOrigin; } From 852fc7d6053459d1a54089c4c1daa724163caba4 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:58:27 -0500 Subject: [PATCH 05/15] Remove unused servicecfg.Config.Console field (#50944) Depends on https://github.com/gravitational/teleport.e/pull/5829 --- e | 2 +- e2e/aws/fixtures_test.go | 1 - integration/appaccess/fixtures.go | 2 -- integration/appaccess/pack.go | 2 -- integration/helpers/instance.go | 4 +--- integration/hostuser_test.go | 4 ++-- integration/integration_test.go | 23 +++++++++-------------- integration/kube_integration_test.go | 2 -- integration/port_forwarding_test.go | 1 - integration/proxy/proxy_helpers.go | 1 - lib/client/api_login_test.go | 4 ---- lib/config/configuration.go | 6 ------ lib/service/servicecfg/config.go | 9 --------- tool/teleport/common/teleport_test.go | 1 - 14 files changed, 13 insertions(+), 49 deletions(-) diff --git a/e b/e index 498f643ea9033..65fa473e50c72 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit 498f643ea9033b1235359d83c310caadb18305d2 +Subproject commit 65fa473e50c72d8f79261033a1298cc2955ca15c diff --git a/e2e/aws/fixtures_test.go b/e2e/aws/fixtures_test.go index 1b30f64f382a5..95373466c237e 100644 --- a/e2e/aws/fixtures_test.go +++ b/e2e/aws/fixtures_test.go @@ -198,7 +198,6 @@ func newTeleportConfig(t *testing.T) *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() // Replace the default auth and proxy listeners with the ones so we can // run multiple tests in parallel. - tconf.Console = nil tconf.Proxy.DisableWebInterface = true tconf.PollingPeriod = 500 * time.Millisecond tconf.Testing.ClientTimeout = time.Second diff --git a/integration/appaccess/fixtures.go b/integration/appaccess/fixtures.go index eee6390c471f5..e6876e7cbdaec 100644 --- a/integration/appaccess/fixtures.go +++ b/integration/appaccess/fixtures.go @@ -346,7 +346,6 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { p.leafCluster = helpers.NewInstance(t, leafCfg) rcConf := servicecfg.MakeDefaultConfig() - rcConf.Console = nil rcConf.Logger = log rcConf.DataDir = t.TempDir() rcConf.Auth.Enabled = true @@ -364,7 +363,6 @@ func SetupWithOptions(t *testing.T, opts AppTestOptions) *Pack { rcConf.Clock = opts.Clock lcConf := servicecfg.MakeDefaultConfig() - lcConf.Console = nil lcConf.Logger = log lcConf.DataDir = t.TempDir() lcConf.Auth.Enabled = true diff --git a/integration/appaccess/pack.go b/integration/appaccess/pack.go index 5a5de08691da4..24eb1e9a5dde2 100644 --- a/integration/appaccess/pack.go +++ b/integration/appaccess/pack.go @@ -759,7 +759,6 @@ func (p *Pack) startRootAppServers(t *testing.T, count int, opts AppTestOptions) for i := 0; i < count; i++ { raConf := servicecfg.MakeDefaultConfig() raConf.Clock = opts.Clock - raConf.Console = nil raConf.Logger = utils.NewSlogLoggerForTests() raConf.DataDir = t.TempDir() raConf.SetToken("static-token-value") @@ -929,7 +928,6 @@ func (p *Pack) startLeafAppServers(t *testing.T, count int, opts AppTestOptions) for i := 0; i < count; i++ { laConf := servicecfg.MakeDefaultConfig() laConf.Clock = opts.Clock - laConf.Console = nil laConf.Logger = utils.NewSlogLoggerForTests() laConf.DataDir = t.TempDir() laConf.SetToken("static-token-value") diff --git a/integration/helpers/instance.go b/integration/helpers/instance.go index 6d375387a02f6..7e7deb03567a8 100644 --- a/integration/helpers/instance.go +++ b/integration/helpers/instance.go @@ -447,10 +447,9 @@ func (i *TeleInstance) GetSiteAPI(siteName string) authclient.ClientI { // Create creates a new instance of Teleport which trusts a list of other clusters (other // instances) -func (i *TeleInstance) Create(t *testing.T, trustedSecrets []*InstanceSecrets, enableSSH bool, console io.Writer) error { +func (i *TeleInstance) Create(t *testing.T, trustedSecrets []*InstanceSecrets, enableSSH bool) error { tconf := servicecfg.MakeDefaultConfig() tconf.SSH.Enabled = enableSSH - tconf.Console = console tconf.Logger = i.Log tconf.Proxy.DisableWebService = true tconf.Proxy.DisableWebInterface = true @@ -1129,7 +1128,6 @@ func (i *TeleInstance) StartProxy(cfg ProxyConfig, opts ...Option) (reversetunne i.tempDirs = append(i.tempDirs, dataDir) tconf := servicecfg.MakeDefaultConfig() - tconf.Console = nil tconf.Logger = i.Log authServer := utils.MustParseAddr(i.Auth) tconf.SetAuthServerAddress(*authServer) diff --git a/integration/hostuser_test.go b/integration/hostuser_test.go index 02145bc38274e..540ae35a59c48 100644 --- a/integration/hostuser_test.go +++ b/integration/hostuser_test.go @@ -661,7 +661,7 @@ func TestRootLoginAsHostUser(t *testing.T) { Roles: []types.Role{role}, } - require.NoError(t, instance.Create(t, nil, true, nil)) + require.NoError(t, instance.Create(t, nil, true)) require.NoError(t, instance.Start()) t.Cleanup(func() { require.NoError(t, instance.StopAll()) @@ -740,7 +740,7 @@ func TestRootStaticHostUsers(t *testing.T) { Logger: utils.NewSlogLoggerForTests(), }) - require.NoError(t, instance.Create(t, nil, false, nil)) + require.NoError(t, instance.Create(t, nil, false)) require.NoError(t, instance.Start()) t.Cleanup(func() { require.NoError(t, instance.StopAll()) diff --git a/integration/integration_test.go b/integration/integration_test.go index 0b48c90b46f39..4e2c4bed4974e 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -711,7 +711,7 @@ func (s *integrationTestSuite) newUnstartedTeleport(t *testing.T, logins []strin for _, login := range logins { teleport.AddUser(login, []string{login}) } - require.NoError(t, teleport.Create(t, nil, enableSSH, nil)) + require.NoError(t, teleport.Create(t, nil, enableSSH)) return teleport } @@ -2564,9 +2564,9 @@ func testTwoClustersProxy(t *testing.T, suite *integrationTestSuite) { a.AddUser(username, []string{username}) b.AddUser(username, []string{username}) - require.NoError(t, b.Create(t, a.Secrets.AsSlice(), false, nil)) + require.NoError(t, b.Create(t, a.Secrets.AsSlice(), false)) defer b.StopAll() - require.NoError(t, a.Create(t, b.Secrets.AsSlice(), true, nil)) + require.NoError(t, a.Create(t, b.Secrets.AsSlice(), true)) defer a.StopAll() require.NoError(t, b.Start()) @@ -2602,8 +2602,8 @@ func testHA(t *testing.T, suite *integrationTestSuite) { a.AddUser(username, []string{username}) b.AddUser(username, []string{username}) - require.NoError(t, b.Create(t, a.Secrets.AsSlice(), true, nil)) - require.NoError(t, a.Create(t, b.Secrets.AsSlice(), true, nil)) + require.NoError(t, b.Create(t, a.Secrets.AsSlice(), true)) + require.NoError(t, a.Create(t, b.Secrets.AsSlice(), true)) require.NoError(t, b.Start()) require.NoError(t, a.Start()) @@ -3950,13 +3950,13 @@ func testDiscoveryRecovers(t *testing.T, suite *integrationTestSuite) { remote.AddUser(username, []string{username}) main.AddUser(username, []string{username}) - require.NoError(t, main.Create(t, remote.Secrets.AsSlice(), false, nil)) + require.NoError(t, main.Create(t, remote.Secrets.AsSlice(), false)) mainSecrets := main.Secrets // switch listen address of the main cluster to load balancer mainProxyAddr := *utils.MustParseAddr(mainSecrets.TunnelAddr) lb.AddBackend(mainProxyAddr) mainSecrets.TunnelAddr = lb.Addr().String() - require.NoError(t, remote.Create(t, mainSecrets.AsSlice(), true, nil)) + require.NoError(t, remote.Create(t, mainSecrets.AsSlice(), true)) require.NoError(t, main.Start()) require.NoError(t, remote.Start()) @@ -4085,13 +4085,13 @@ func testDiscovery(t *testing.T, suite *integrationTestSuite) { remote.AddUser(username, []string{username}) main.AddUser(username, []string{username}) - require.NoError(t, main.Create(t, remote.Secrets.AsSlice(), false, nil)) + require.NoError(t, main.Create(t, remote.Secrets.AsSlice(), false)) mainSecrets := main.Secrets // switch listen address of the main cluster to load balancer mainProxyAddr := *utils.MustParseAddr(mainSecrets.TunnelAddr) lb.AddBackend(mainProxyAddr) mainSecrets.TunnelAddr = lb.Addr().String() - require.NoError(t, remote.Create(t, mainSecrets.AsSlice(), true, nil)) + require.NoError(t, remote.Create(t, mainSecrets.AsSlice(), true)) require.NoError(t, main.Start()) require.NoError(t, remote.Start()) @@ -7223,7 +7223,6 @@ func WithListeners(setupFn helpers.InstanceListenerSetupFunc) InstanceConfigOpti func (s *integrationTestSuite) defaultServiceConfig() *servicecfg.Config { cfg := servicecfg.MakeDefaultConfig() - cfg.Console = nil cfg.Logger = s.Log cfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() cfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() @@ -8572,7 +8571,6 @@ func TestConnectivityWithoutAuth(t *testing.T) { // Create auth config. authCfg := servicecfg.MakeDefaultConfig() - authCfg.Console = nil authCfg.Logger = utils.NewSlogLoggerForTests() authCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() authCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() @@ -8635,7 +8633,6 @@ func TestConnectivityWithoutAuth(t *testing.T) { nodeCfg.SetToken("token") nodeCfg.CachePolicy.Enabled = true nodeCfg.DataDir = t.TempDir() - nodeCfg.Console = nil nodeCfg.Logger = utils.NewSlogLoggerForTests() nodeCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() nodeCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() @@ -8716,7 +8713,6 @@ func TestConnectivityDuringAuthRestart(t *testing.T) { // Create auth config. authCfg := servicecfg.MakeDefaultConfig() - authCfg.Console = nil authCfg.Logger = utils.NewSlogLoggerForTests() authCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() authCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() @@ -8776,7 +8772,6 @@ func TestConnectivityDuringAuthRestart(t *testing.T) { nodeCfg.SetToken("token") nodeCfg.CachePolicy.Enabled = true nodeCfg.DataDir = t.TempDir() - nodeCfg.Console = nil nodeCfg.Logger = utils.NewSlogLoggerForTests() nodeCfg.CircuitBreakerConfig = breaker.NoopBreakerConfig() nodeCfg.InstanceMetadataClient = imds.NewDisabledIMDSClient() diff --git a/integration/kube_integration_test.go b/integration/kube_integration_test.go index 264bbfdf50706..51568d0e6bc7e 100644 --- a/integration/kube_integration_test.go +++ b/integration/kube_integration_test.go @@ -1833,7 +1833,6 @@ type sessionMetadataResponse struct { // teleKubeConfig sets up teleport with kubernetes turned on func (s *KubeSuite) teleKubeConfig(hostname string) *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() - tconf.Console = nil tconf.Logger = s.log tconf.SSH.Enabled = true tconf.Proxy.DisableWebInterface = true @@ -1854,7 +1853,6 @@ func (s *KubeSuite) teleKubeConfig(hostname string) *servicecfg.Config { // teleKubeConfig sets up teleport with kubernetes turned on func (s *KubeSuite) teleAuthConfig(hostname string) *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() - tconf.Console = nil tconf.Logger = s.log tconf.PollingPeriod = 500 * time.Millisecond tconf.Testing.ClientTimeout = time.Second diff --git a/integration/port_forwarding_test.go b/integration/port_forwarding_test.go index 88af150695872..cdef9e9b6f35a 100644 --- a/integration/port_forwarding_test.go +++ b/integration/port_forwarding_test.go @@ -205,7 +205,6 @@ func testPortForwarding(t *testing.T, suite *integrationTestSuite) { nodeCfg.SetToken("token") nodeCfg.CachePolicy.Enabled = true nodeCfg.DataDir = t.TempDir() - nodeCfg.Console = nil nodeCfg.Auth.Enabled = false nodeCfg.Proxy.Enabled = false nodeCfg.SSH.Enabled = true diff --git a/integration/proxy/proxy_helpers.go b/integration/proxy/proxy_helpers.go index b5796110eb53d..e4e0823c2cefd 100644 --- a/integration/proxy/proxy_helpers.go +++ b/integration/proxy/proxy_helpers.go @@ -196,7 +196,6 @@ func newSuite(t *testing.T, opts ...proxySuiteOptionsFunc) *Suite { func (p *Suite) addNodeToLeafCluster(t *testing.T, tunnelNodeHostname string) { nodeConfig := func() *servicecfg.Config { tconf := servicecfg.MakeDefaultConfig() - tconf.Console = nil tconf.Logger = utils.NewSlogLoggerForTests() tconf.Hostname = tunnelNodeHostname tconf.SetToken("token") diff --git a/lib/client/api_login_test.go b/lib/client/api_login_test.go index e06e73c6ce648..15d09fc03b671 100644 --- a/lib/client/api_login_test.go +++ b/lib/client/api_login_test.go @@ -516,8 +516,6 @@ type standaloneBundle struct { func newStandaloneTeleport(t *testing.T, clock clockwork.Clock) *standaloneBundle { randomAddr := utils.NetAddr{AddrNetwork: "tcp", Addr: "127.0.0.1:0"} - console := io.Discard - staticToken := uuid.New().String() // Prepare role and user. @@ -549,7 +547,6 @@ func newStandaloneTeleport(t *testing.T, clock clockwork.Clock) *standaloneBundl cfg.DataDir = makeDataDir() cfg.Hostname = "localhost" cfg.Clock = clock - cfg.Console = console cfg.Logger = utils.NewSlogLoggerForTests() cfg.SetAuthServerAddress(randomAddr) // must be present cfg.Auth.Preference, err = types.NewAuthPreferenceFromConfigFile(types.AuthPreferenceSpecV2{ @@ -633,7 +630,6 @@ func newStandaloneTeleport(t *testing.T, clock clockwork.Clock) *standaloneBundl cfg.Hostname = "localhost" cfg.SetToken(staticToken) cfg.Clock = clock - cfg.Console = console cfg.Logger = utils.NewSlogLoggerForTests() cfg.SetAuthServerAddress(*authAddr) cfg.Auth.Enabled = false diff --git a/lib/config/configuration.go b/lib/config/configuration.go index dda2ac6859cf4..45d3544012cfe 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -781,11 +781,6 @@ func applyAuthOrProxyAddress(fc *FileConfig, cfg *servicecfg.Config) error { } func applyLogConfig(loggerConfig Log, cfg *servicecfg.Config) error { - switch loggerConfig.Output { - case "stderr", "error", "2", "stdout", "out", "1": - cfg.Console = io.Discard // disable console printing - } - logger, level, err := logutils.Initialize(logutils.Config{ Output: loggerConfig.Output, Severity: loggerConfig.Severity, @@ -2514,7 +2509,6 @@ func Configure(clf *CommandLineFlags, cfg *servicecfg.Config, legacyAppFlags boo // apply --debug flag to config: if clf.Debug { - cfg.Console = io.Discard cfg.Debug = clf.Debug } diff --git a/lib/service/servicecfg/config.go b/lib/service/servicecfg/config.go index a89e29a2c7b54..a7841b4d10db4 100644 --- a/lib/service/servicecfg/config.go +++ b/lib/service/servicecfg/config.go @@ -21,7 +21,6 @@ package servicecfg import ( "context" - "io" "log/slog" "net" "net/http" @@ -133,9 +132,6 @@ type Config struct { // a teleport cluster). It's automatically generated on 1st start HostUUID string - // Console writer to speak to a user - Console io.Writer - // ReverseTunnels is a list of reverse tunnels to create on the // first cluster start ReverseTunnels []types.ReverseTunnel @@ -551,7 +547,6 @@ func ApplyDefaults(cfg *Config) { // Global defaults. cfg.Hostname = hostname cfg.DataDir = defaults.DataDir - cfg.Console = os.Stdout cfg.CipherSuites = utils.DefaultCipherSuites() cfg.Ciphers = sc.Ciphers cfg.KEXAlgorithms = kex @@ -695,10 +690,6 @@ func applyDefaults(cfg *Config) { cfg.Version = defaults.TeleportConfigVersionV1 } - if cfg.Console == nil { - cfg.Console = io.Discard - } - if cfg.Logger == nil { cfg.Logger = slog.Default() } diff --git a/tool/teleport/common/teleport_test.go b/tool/teleport/common/teleport_test.go index 7b1292f1e625c..fbf449fe37bc1 100644 --- a/tool/teleport/common/teleport_test.go +++ b/tool/teleport/common/teleport_test.go @@ -84,7 +84,6 @@ func TestTeleportMain(t *testing.T) { require.True(t, conf.Auth.Enabled) require.True(t, conf.SSH.Enabled) require.True(t, conf.Proxy.Enabled) - require.Equal(t, os.Stdout, conf.Console) require.True(t, slog.Default().Handler().Enabled(context.Background(), slog.LevelError)) }) From 9948ed4e2557820076b0d305f465f8f06d788941 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:03:07 -0500 Subject: [PATCH 06/15] Use locally scoped slog loggers instead of default (#50950) Cleans up some todos left during the logrus to slog conversion process. --- lib/reversetunnel/srv.go | 17 +++++++---------- lib/srv/discovery/database_watcher.go | 4 +--- lib/srv/discovery/discovery.go | 5 ++--- lib/srv/discovery/kube_services_watcher.go | 4 +--- 4 files changed, 11 insertions(+), 19 deletions(-) diff --git a/lib/reversetunnel/srv.go b/lib/reversetunnel/srv.go index eb7483eec6477..e83efccf31166 100644 --- a/lib/reversetunnel/srv.go +++ b/lib/reversetunnel/srv.go @@ -303,8 +303,7 @@ func NewServer(cfg Config) (reversetunnelclient.Server, error) { ResourceWatcherConfig: services.ResourceWatcherConfig{ Component: cfg.Component, Client: cfg.LocalAccessPoint, - // TODO(tross): update this after converting to slog here - // Logger: cfg.Log, + Logger: cfg.Logger, }, ProxiesC: make(chan []types.Server, 10), ProxyGetter: cfg.LocalAccessPoint, @@ -1211,10 +1210,9 @@ func newRemoteSite(srv *server, domainName string, sconn ssh.Conn) (*remoteSite, remoteSite.remoteAccessPoint = accessPoint nodeWatcher, err := services.NewNodeWatcher(closeContext, services.NodeWatcherConfig{ ResourceWatcherConfig: services.ResourceWatcherConfig{ - Component: srv.Component, - Client: accessPoint, - // TODO(tross) update this after converting to use slog - // Logger: srv.Log, + Component: srv.Component, + Client: accessPoint, + Logger: srv.Logger, MaxStaleness: time.Minute, }, NodesGetter: accessPoint, @@ -1247,10 +1245,9 @@ func newRemoteSite(srv *server, domainName string, sconn ssh.Conn) (*remoteSite, remoteWatcher, err := services.NewCertAuthorityWatcher(srv.ctx, services.CertAuthorityWatcherConfig{ ResourceWatcherConfig: services.ResourceWatcherConfig{ Component: teleport.ComponentProxy, - // TODO(tross): update this after converting to slog - // Logger: srv.log, - Clock: srv.Clock, - Client: remoteSite.remoteAccessPoint, + Logger: srv.logger, + Clock: srv.Clock, + Client: remoteSite.remoteAccessPoint, }, Types: []types.CertAuthType{types.HostCA}, }) diff --git a/lib/srv/discovery/database_watcher.go b/lib/srv/discovery/database_watcher.go index b14332a8f9bb4..297b9a7dfd8cd 100644 --- a/lib/srv/discovery/database_watcher.go +++ b/lib/srv/discovery/database_watcher.go @@ -20,7 +20,6 @@ package discovery import ( "context" - "log/slog" "sync" "github.com/gravitational/trace" @@ -54,8 +53,7 @@ func (s *Server) startDatabaseWatchers() error { defer mu.Unlock() return utils.FromSlice(newDatabases, types.Database.GetName) }, - // TODO(tross): update to use the server logger once it is converted to use slog - Logger: slog.With("kind", types.KindDatabase), + Logger: s.Log.With("kind", types.KindDatabase), OnCreate: s.onDatabaseCreate, OnUpdate: s.onDatabaseUpdate, OnDelete: s.onDatabaseDelete, diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index 047553edeabde..f27f60a112e2b 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -1908,9 +1908,8 @@ func (s *Server) getAzureSubscriptions(ctx context.Context, subs []string) ([]st func (s *Server) initTeleportNodeWatcher() (err error) { s.nodeWatcher, err = services.NewNodeWatcher(s.ctx, services.NodeWatcherConfig{ ResourceWatcherConfig: services.ResourceWatcherConfig{ - Component: teleport.ComponentDiscovery, - // TODO(tross): update this after converting logging to use slog - // Logger: s.Logger, + Component: teleport.ComponentDiscovery, + Logger: s.Log, Client: s.AccessPoint, MaxStaleness: time.Minute, }, diff --git a/lib/srv/discovery/kube_services_watcher.go b/lib/srv/discovery/kube_services_watcher.go index 8e63c6947242a..9940734248588 100644 --- a/lib/srv/discovery/kube_services_watcher.go +++ b/lib/srv/discovery/kube_services_watcher.go @@ -20,7 +20,6 @@ package discovery import ( "context" - "log/slog" "sync" "time" @@ -62,8 +61,7 @@ func (s *Server) startKubeAppsWatchers() error { defer mu.Unlock() return utils.FromSlice(appResources, types.Application.GetName) }, - // TODO(tross): update to use the server logger once it is converted to use slog - Logger: slog.With("kind", types.KindApp), + Logger: s.Log.With("kind", types.KindApp), OnCreate: s.onAppCreate, OnUpdate: s.onAppUpdate, OnDelete: s.onAppDelete, From 646329da7db47dd83472ebc7bc0e36e8f67479e5 Mon Sep 17 00:00:00 2001 From: Bernard Kim Date: Mon, 13 Jan 2025 09:21:09 -0800 Subject: [PATCH 07/15] operator: Support trusted_cluster resources (#49920) * Add UpsertTrustedClusterV2 rpc This supersedes UpsertTrustedCluster rpc. V2 performs resource name validation. * Replace confusing UpsertValidationTrustedCluster name * Use UpsertTrustedClusterV2 in tests * Address feedback - Remove unnecessary ping - Update error messages - Use skipNameValidation consts - Validate cluster name before establishing trust - Do not reveal cluster name in error message - Use BadParameter instead of CompareFailed * Use webclient.Find * Fix test/lint * Allow label updates * Fix test * Update CRDs 1. Run `make manifests`. 2. Run `make -C crdgen update-protos`. 3. Run `make -C crdgen update-snapshot`. * Implement trusted_cluster CRD * Update docs * Support secret lookup * Update secret lookup docs * Fix error handling * Use V2 * Implement CreateTrustedClusterV2 and UpdateTrustedClusterV2 * Address feedback * Minor fixes * Use Create/Update instead of Upsert * Update crdgen * Update trusted_cluster tests * Move V2 RPCs to the trust service * crdgen * Remove V2 suffix * 2024 -> 2025 * Use slog --- .../teleport-operator/secret-lookup.mdx | 5 +- .../teleport-operator/teleport-operator.mdx | 19 +- ...sources.teleport.dev_trustedclustersv2.mdx | 41 ++ ...ources.teleport.dev_trustedclustersv2.yaml | 149 +++++++ .../teleport-operator/templates/role.yaml | 2 + .../templates/auth/config.yaml | 8 + integrations/operator/README.md | 4 + .../resources/v1/trusted_cluster_types.go | 96 +++++ .../resources/v1/zz_generated.deepcopy.go | 69 ++++ ...ources.teleport.dev_trustedclustersv2.yaml | 149 +++++++ .../legacy_resource_without_labels.go | 2 +- .../operator/controllers/resources/setup.go | 1 + .../controllers/resources/testlib/env.go | 1 + .../resources/trusted_cluster_controller.go | 91 +++++ .../trusted_clusterv2_controller_test.go | 356 +++++++++++++++++ .../operator/crdgen/additional_doc.go | 3 + integrations/operator/crdgen/handlerequest.go | 1 + integrations/operator/crdgen/ignored.go | 3 + ...ces.teleport.dev_openssheiceserversv2.yaml | 14 + ...sources.teleport.dev_opensshserversv2.yaml | 14 + .../golden/resources.teleport.dev_roles.yaml | 92 +++++ .../resources.teleport.dev_rolesv6.yaml | 46 +++ .../resources.teleport.dev_rolesv7.yaml | 46 +++ ...ources.teleport.dev_trustedclustersv2.yaml | 149 +++++++ .../golden/resources.teleport.dev_users.yaml | 12 + .../legacy/client/proto/authservice.proto | 124 +++++- .../teleport/legacy/client/proto/event.proto | 5 + .../teleport/legacy/types/events/events.proto | 378 ++++++++++++++++++ .../types/trusted_device_requirement.proto | 37 ++ .../teleport/legacy/types/types.proto | 181 ++++++++- .../operator/hack/fixture-operator-role.yaml | 8 + lib/auth/trustedcluster.go | 1 - 32 files changed, 2074 insertions(+), 33 deletions(-) create mode 100644 docs/pages/reference/operator-resources/resources.teleport.dev_trustedclustersv2.mdx create mode 100644 examples/chart/teleport-cluster/charts/teleport-operator/operator-crds/resources.teleport.dev_trustedclustersv2.yaml create mode 100644 integrations/operator/apis/resources/v1/trusted_cluster_types.go create mode 100644 integrations/operator/config/crd/bases/resources.teleport.dev_trustedclustersv2.yaml create mode 100644 integrations/operator/controllers/resources/trusted_cluster_controller.go create mode 100644 integrations/operator/controllers/resources/trusted_clusterv2_controller_test.go create mode 100644 integrations/operator/crdgen/testdata/golden/resources.teleport.dev_trustedclustersv2.yaml create mode 100644 integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/trusted_device_requirement.proto diff --git a/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/secret-lookup.mdx b/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/secret-lookup.mdx index a23e4935c5051..ca8f2b20e5b60 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/secret-lookup.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/secret-lookup.mdx @@ -11,7 +11,10 @@ of the Teleport Kubernetes operator CRs. Some Teleport resources might contain sensitive values. Select CR fields can reference an existing Kubernetes secret and the operator will retrieve the value from the secret when reconciling. -Currently only the GithubConnector and OIDCConnector `client_secret` field support secret lookup. +Currently supported fields for secret lookup: +- GithubConnector `client_secret` +- OIDCConnector `client_secret` +- TrustedClusterV2 `token` ## Prerequisites diff --git a/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/teleport-operator.mdx b/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/teleport-operator.mdx index e8dec4b877a13..890421acf2742 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/teleport-operator.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/teleport-operator/teleport-operator.mdx @@ -26,16 +26,21 @@ could cause instability and non-deterministic behaviour. Currently supported Teleport resources are: -- users (`TeleportUser`) -- roles +- Users (`TeleportUser`) +- Roles - `TeleportRole` creates role v5 - `TeleportRoleV6` creates role v6 - `TeleportRoleV7` creates role v7 -- OIDC connectors (`TeleportOIDCConnector`) -- SAML connectors (`TeleportSAMLConnector`) -- GitHub connectors (`TeleportGithubConnector`) -- provision tokens (`TeleportProvisionToken`) -- Login Rules (`TeleportLoginRules`) +- OIDC Connectors (`TeleportOIDCConnector`) +- SAML Connectors (`TeleportSAMLConnector`) +- GitHub Connectors (`TeleportGithubConnector`) +- Provision Tokens (`TeleportProvisionToken`) +- Login Rules (`TeleportLoginRule`) +- Access Lists (`TeleportAccessList`) +- Okta Import Rules (`TeleportOktaImportRule`) +- OpenSSHEICE Servers (`TeleportOpenSSHEICEServerV2`) +- OpenSSH Servers (`TeleportOpenSSHServerV2`) +- Trusted Clusters (`TeleportTrustedClusterV2`) ### Setting up the operator diff --git a/docs/pages/reference/operator-resources/resources.teleport.dev_trustedclustersv2.mdx b/docs/pages/reference/operator-resources/resources.teleport.dev_trustedclustersv2.mdx new file mode 100644 index 0000000000000..8728b51b2ab5c --- /dev/null +++ b/docs/pages/reference/operator-resources/resources.teleport.dev_trustedclustersv2.mdx @@ -0,0 +1,41 @@ +--- +title: TeleportTrustedClusterV2 +description: Provides a comprehensive list of fields in the TeleportTrustedClusterV2 resource available through the Teleport Kubernetes operator +tocDepth: 3 +--- + +{/*Auto-generated file. Do not edit.*/} +{/*To regenerate, navigate to integrations/operator and run "make crd-docs".*/} + +This guide is a comprehensive reference to the fields in the `TeleportTrustedClusterV2` +resource, which you can apply after installing the Teleport Kubernetes operator. + + +## resources.teleport.dev/v1 + +**apiVersion:** resources.teleport.dev/v1 + +|Field|Type|Description| +|---|---|---| +|apiVersion|string|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources| +|kind|string|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| +|metadata|object|| +|spec|[object](#spec)|TrustedCluster resource definition v2 from Teleport| + +### spec + +|Field|Type|Description| +|---|---|---| +|enabled|boolean|Enabled is a bool that indicates if the TrustedCluster is enabled or disabled. Setting Enabled to false has a side effect of deleting the user and host certificate authority (CA).| +|role_map|[][object](#specrole_map-items)|RoleMap specifies role mappings to remote roles.| +|token|string|Token is the authorization token provided by another cluster needed by this cluster to join. This field supports secret lookup. See the operator documentation for more details.| +|tunnel_addr|string|ReverseTunnelAddress is the address of the SSH proxy server of the cluster to join. If not set, it is derived from `:`.| +|web_proxy_addr|string|ProxyAddress is the address of the web proxy server of the cluster to join. If not set, it is derived from `:`.| + +### spec.role_map items + +|Field|Type|Description| +|---|---|---| +|local|[]string|Local specifies local roles to map to| +|remote|string|Remote specifies remote role name to map from| + diff --git a/examples/chart/teleport-cluster/charts/teleport-operator/operator-crds/resources.teleport.dev_trustedclustersv2.yaml b/examples/chart/teleport-cluster/charts/teleport-operator/operator-crds/resources.teleport.dev_trustedclustersv2.yaml new file mode 100644 index 0000000000000..4cf1410472b64 --- /dev/null +++ b/examples/chart/teleport-cluster/charts/teleport-operator/operator-crds/resources.teleport.dev_trustedclustersv2.yaml @@ -0,0 +1,149 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleporttrustedclustersv2.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportTrustedClusterV2 + listKind: TeleportTrustedClusterV2List + plural: teleporttrustedclustersv2 + shortNames: + - trustedclusterv2 + - trustedclustersv2 + singular: teleporttrustedclusterv2 + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: TrustedClusterV2 is the Schema for the trustedclustersv2 API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrustedCluster resource definition v2 from Teleport + properties: + enabled: + description: Enabled is a bool that indicates if the TrustedCluster + is enabled or disabled. Setting Enabled to false has a side effect + of deleting the user and host certificate authority (CA). + type: boolean + role_map: + description: RoleMap specifies role mappings to remote roles. + items: + properties: + local: + description: Local specifies local roles to map to + items: + type: string + nullable: true + type: array + remote: + description: Remote specifies remote role name to map from + type: string + type: object + type: array + token: + description: Token is the authorization token provided by another + cluster needed by this cluster to join. This field supports secret + lookup. See the operator documentation for more details. + type: string + tunnel_addr: + description: ReverseTunnelAddress is the address of the SSH proxy + server of the cluster to join. If not set, it is derived from `:`. + type: string + web_proxy_addr: + description: ProxyAddress is the address of the web proxy server of + the cluster to join. If not set, it is derived from `:`. + type: string + type: object + status: + description: Status defines the observed state of the Teleport resource + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/examples/chart/teleport-cluster/charts/teleport-operator/templates/role.yaml b/examples/chart/teleport-cluster/charts/teleport-operator/templates/role.yaml index 25b8c72416dc6..1b7c21935ce5c 100644 --- a/examples/chart/teleport-cluster/charts/teleport-operator/templates/role.yaml +++ b/examples/chart/teleport-cluster/charts/teleport-operator/templates/role.yaml @@ -36,6 +36,8 @@ rules: - teleportopensshserversv2/status - teleportopenssheiceserversv2 - teleportopenssheiceserversv2/status + - teleporttrustedclustersv2 + - teleporttrustedclustersv2/status verbs: - get - list diff --git a/examples/chart/teleport-cluster/templates/auth/config.yaml b/examples/chart/teleport-cluster/templates/auth/config.yaml index 99fe59e061c9c..d1c4bffcb5cf6 100644 --- a/examples/chart/teleport-cluster/templates/auth/config.yaml +++ b/examples/chart/teleport-cluster/templates/auth/config.yaml @@ -131,6 +131,14 @@ data: - read - update - delete + - resources: + - trusted_cluster + verbs: + - list + - create + - read + - update + - delete deny: {} version: v7 --- diff --git a/integrations/operator/README.md b/integrations/operator/README.md index 8e91c62d6d46c..d240ca82da84b 100644 --- a/integrations/operator/README.md +++ b/integrations/operator/README.md @@ -20,6 +20,10 @@ The operator supports reconciling the following Kubernetes CRs: - TeleportRoleV7 (creates role v7) - TeleportProvisionToken - TeleportGithubConnector +- TeleportAccessList +- TeleportOpenSSHEICEServerV2 +- TeleportOpenSSHServerV2 +- TeleportTrustedClusterV2 - TeleportSAMLConnector [1] - TeleportOIDCConnector [1] - TeleportLoginRule [1] diff --git a/integrations/operator/apis/resources/v1/trusted_cluster_types.go b/integrations/operator/apis/resources/v1/trusted_cluster_types.go new file mode 100644 index 0000000000000..0f6b8f753fac2 --- /dev/null +++ b/integrations/operator/apis/resources/v1/trusted_cluster_types.go @@ -0,0 +1,96 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/integrations/operator/apis/resources" +) + +func init() { + SchemeBuilder.Register(&TeleportTrustedClusterV2{}, &TeleportTrustedClusterV2List{}) +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// TeleportTrustedClusterV2 is the Schema for the trusted_clusters API +type TeleportTrustedClusterV2 struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TeleportTrustedClusterV2Spec `json:"spec,omitempty"` + Status resources.Status `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// TeleportTrustedClusterV2List contains a list of TeleportTrustedClusterV2 +type TeleportTrustedClusterV2List struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TeleportTrustedClusterV2 `json:"items"` +} + +// ToTeleport converts the resource to the teleport trusted_cluster API type. +func (r TeleportTrustedClusterV2) ToTeleport() types.TrustedCluster { + return &types.TrustedClusterV2{ + Kind: types.KindTrustedCluster, + Version: types.V2, + Metadata: types.Metadata{ + Name: r.Name, + Labels: r.Labels, + Description: r.Annotations[resources.DescriptionKey], + }, + Spec: types.TrustedClusterSpecV2(r.Spec), + } +} + +// TeleportTrustedClusterV2Spec defines the desired state of TeleportTrustedClusterV2 +type TeleportTrustedClusterV2Spec types.TrustedClusterSpecV2 + +// Marshal serializes a spec into binary data. +func (spec *TeleportTrustedClusterV2Spec) Marshal() ([]byte, error) { + return (*types.TrustedClusterSpecV2)(spec).Marshal() +} + +// Unmarshal deserializes a spec from binary data. +func (spec *TeleportTrustedClusterV2Spec) Unmarshal(data []byte) error { + return (*types.TrustedClusterSpecV2)(spec).Unmarshal(data) +} + +// DeepCopyInto deep-copies one trusted_cluster spec into another. +// Required to satisfy runtime.Object interface. +func (spec *TeleportTrustedClusterV2Spec) DeepCopyInto(out *TeleportTrustedClusterV2Spec) { + data, err := spec.Marshal() + if err != nil { + panic(err) + } + *out = TeleportTrustedClusterV2Spec{} + if err = out.Unmarshal(data); err != nil { + panic(err) + } +} + +// StatusConditions returns a pointer to Status.Conditions slice. +func (r *TeleportTrustedClusterV2) StatusConditions() *[]metav1.Condition { + return &r.Status.Conditions +} diff --git a/integrations/operator/apis/resources/v1/zz_generated.deepcopy.go b/integrations/operator/apis/resources/v1/zz_generated.deepcopy.go index e2f6b7ce932c1..6b803d79d2577 100644 --- a/integrations/operator/apis/resources/v1/zz_generated.deepcopy.go +++ b/integrations/operator/apis/resources/v1/zz_generated.deepcopy.go @@ -605,3 +605,72 @@ func (in *TeleportRoleV7Spec) DeepCopy() *TeleportRoleV7Spec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeleportTrustedClusterV2) DeepCopyInto(out *TeleportTrustedClusterV2) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeleportTrustedClusterV2. +func (in *TeleportTrustedClusterV2) DeepCopy() *TeleportTrustedClusterV2 { + if in == nil { + return nil + } + out := new(TeleportTrustedClusterV2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TeleportTrustedClusterV2) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeleportTrustedClusterV2List) DeepCopyInto(out *TeleportTrustedClusterV2List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TeleportTrustedClusterV2, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeleportTrustedClusterV2List. +func (in *TeleportTrustedClusterV2List) DeepCopy() *TeleportTrustedClusterV2List { + if in == nil { + return nil + } + out := new(TeleportTrustedClusterV2List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TeleportTrustedClusterV2List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeleportTrustedClusterV2Spec. +func (in *TeleportTrustedClusterV2Spec) DeepCopy() *TeleportTrustedClusterV2Spec { + if in == nil { + return nil + } + out := new(TeleportTrustedClusterV2Spec) + in.DeepCopyInto(out) + return out +} diff --git a/integrations/operator/config/crd/bases/resources.teleport.dev_trustedclustersv2.yaml b/integrations/operator/config/crd/bases/resources.teleport.dev_trustedclustersv2.yaml new file mode 100644 index 0000000000000..4cf1410472b64 --- /dev/null +++ b/integrations/operator/config/crd/bases/resources.teleport.dev_trustedclustersv2.yaml @@ -0,0 +1,149 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleporttrustedclustersv2.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportTrustedClusterV2 + listKind: TeleportTrustedClusterV2List + plural: teleporttrustedclustersv2 + shortNames: + - trustedclusterv2 + - trustedclustersv2 + singular: teleporttrustedclusterv2 + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: TrustedClusterV2 is the Schema for the trustedclustersv2 API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrustedCluster resource definition v2 from Teleport + properties: + enabled: + description: Enabled is a bool that indicates if the TrustedCluster + is enabled or disabled. Setting Enabled to false has a side effect + of deleting the user and host certificate authority (CA). + type: boolean + role_map: + description: RoleMap specifies role mappings to remote roles. + items: + properties: + local: + description: Local specifies local roles to map to + items: + type: string + nullable: true + type: array + remote: + description: Remote specifies remote role name to map from + type: string + type: object + type: array + token: + description: Token is the authorization token provided by another + cluster needed by this cluster to join. This field supports secret + lookup. See the operator documentation for more details. + type: string + tunnel_addr: + description: ReverseTunnelAddress is the address of the SSH proxy + server of the cluster to join. If not set, it is derived from `:`. + type: string + web_proxy_addr: + description: ProxyAddress is the address of the web proxy server of + the cluster to join. If not set, it is derived from `:`. + type: string + type: object + status: + description: Status defines the observed state of the Teleport resource + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/integrations/operator/controllers/reconcilers/legacy_resource_without_labels.go b/integrations/operator/controllers/reconcilers/legacy_resource_without_labels.go index c7079240a9d7b..307c1283ee398 100644 --- a/integrations/operator/controllers/reconcilers/legacy_resource_without_labels.go +++ b/integrations/operator/controllers/reconcilers/legacy_resource_without_labels.go @@ -62,7 +62,7 @@ func (a ResourceWithoutLabelsAdapter[T]) SetResourceRevision(res T, revision str } // SetResourceLabels implements the Adapter interface. As the resource does not -// // support labels, it only sets the origin label. +// support labels, it only sets the origin label. func (a ResourceWithoutLabelsAdapter[T]) SetResourceLabels(res T, labels map[string]string) { // We don't set all labels as the Resource doesn't support them // Only the origin diff --git a/integrations/operator/controllers/resources/setup.go b/integrations/operator/controllers/resources/setup.go index a2e78a8cdc68c..fffceccbf8c39 100644 --- a/integrations/operator/controllers/resources/setup.go +++ b/integrations/operator/controllers/resources/setup.go @@ -47,6 +47,7 @@ func SetupAllControllers(log logr.Logger, mgr manager.Manager, teleportClient *c {"TeleportProvisionToken", NewProvisionTokenReconciler}, {"TeleportOpenSSHServerV2", NewOpenSSHServerV2Reconciler}, {"TeleportOpenSSHEICEServerV2", NewOpenSSHEICEServerV2Reconciler}, + {"TeleportTrustedClusterV2", NewTrustedClusterV2Reconciler}, } oidc := modules.GetProtoEntitlement(features, entitlements.OIDC) diff --git a/integrations/operator/controllers/resources/testlib/env.go b/integrations/operator/controllers/resources/testlib/env.go index 9de19230826c3..e41a4f22677eb 100644 --- a/integrations/operator/controllers/resources/testlib/env.go +++ b/integrations/operator/controllers/resources/testlib/env.go @@ -139,6 +139,7 @@ func defaultTeleportServiceConfig(t *testing.T) (*helpers.TeleInstance, string) types.NewRule(types.KindOktaImportRule, unrestricted), types.NewRule(types.KindAccessList, unrestricted), types.NewRule(types.KindNode, unrestricted), + types.NewRule(types.KindTrustedCluster, unrestricted), }, }, }) diff --git a/integrations/operator/controllers/resources/trusted_cluster_controller.go b/integrations/operator/controllers/resources/trusted_cluster_controller.go new file mode 100644 index 0000000000000..a3154bed00b42 --- /dev/null +++ b/integrations/operator/controllers/resources/trusted_cluster_controller.go @@ -0,0 +1,91 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package resources + +import ( + "context" + + "github.com/gravitational/trace" + kclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gravitational/teleport/api/client" + "github.com/gravitational/teleport/api/types" + resourcesv1 "github.com/gravitational/teleport/integrations/operator/apis/resources/v1" + "github.com/gravitational/teleport/integrations/operator/controllers" + "github.com/gravitational/teleport/integrations/operator/controllers/reconcilers" + "github.com/gravitational/teleport/integrations/operator/controllers/resources/secretlookup" +) + +// trustedClusterClient implements TeleportResourceClient and offers CRUD +// methods needed to reconcile trusted_clusters. +type trustedClusterClient struct { + teleportClient *client.Client + kubeClient kclient.Client +} + +// Get gets the Teleport trusted_cluster of a given name. +func (r trustedClusterClient) Get(ctx context.Context, name string) (types.TrustedCluster, error) { + trustedCluster, err := r.teleportClient.GetTrustedCluster(ctx, name) + return trustedCluster, trace.Wrap(err) +} + +// Create creates a Teleport trusted_cluster. +func (r trustedClusterClient) Create(ctx context.Context, trustedCluster types.TrustedCluster) error { + _, err := r.teleportClient.CreateTrustedCluster(ctx, trustedCluster) + return trace.Wrap(err) +} + +// Update updates a Teleport trusted_cluster. +func (r trustedClusterClient) Update(ctx context.Context, trustedCluster types.TrustedCluster) error { + _, err := r.teleportClient.UpdateTrustedCluster(ctx, trustedCluster) + return trace.Wrap(err) +} + +// Delete deletes a Teleport trusted_cluster. +func (r trustedClusterClient) Delete(ctx context.Context, name string) error { + return trace.Wrap(r.teleportClient.DeleteTrustedCluster(ctx, name)) +} + +// Mutate mutates a Teleport trusted_cluster. +func (r trustedClusterClient) Mutate(ctx context.Context, new, existing types.TrustedCluster, crKey kclient.ObjectKey) error { + secret := new.GetToken() + if secretlookup.IsNeeded(secret) { + resolvedSecret, err := secretlookup.Try(ctx, r.kubeClient, crKey.Name, crKey.Namespace, secret) + if err != nil { + return trace.Wrap(err) + } + new.SetToken(resolvedSecret) + } + return nil +} + +// NewTrustedClusterV2Reconciler instantiates a new Kubernetes controller reconciling trusted_cluster v2 resources +func NewTrustedClusterV2Reconciler(client kclient.Client, tClient *client.Client) (controllers.Reconciler, error) { + trustedClusterClient := &trustedClusterClient{ + teleportClient: tClient, + kubeClient: client, + } + + resourceReconciler, err := reconcilers.NewTeleportResourceWithoutLabelsReconciler[types.TrustedCluster, *resourcesv1.TeleportTrustedClusterV2]( + client, + trustedClusterClient, + ) + + return resourceReconciler, trace.Wrap(err, "building teleport resource reconciler") +} diff --git a/integrations/operator/controllers/resources/trusted_clusterv2_controller_test.go b/integrations/operator/controllers/resources/trusted_clusterv2_controller_test.go new file mode 100644 index 0000000000000..a3b1dc98de5ba --- /dev/null +++ b/integrations/operator/controllers/resources/trusted_clusterv2_controller_test.go @@ -0,0 +1,356 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package resources_test + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "github.com/gravitational/trace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + kclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/integration/helpers" + resourcesv1 "github.com/gravitational/teleport/integrations/operator/apis/resources/v1" + "github.com/gravitational/teleport/integrations/operator/controllers/reconcilers" + "github.com/gravitational/teleport/integrations/operator/controllers/resources/secretlookup" + "github.com/gravitational/teleport/integrations/operator/controllers/resources/testlib" + "github.com/gravitational/teleport/lib" + "github.com/gravitational/teleport/lib/service/servicecfg" + "github.com/gravitational/teleport/lib/utils" +) + +type trustedClusterV2TestingPrimitives struct { + // remoteCluster specifies the remote trusted cluster instance. + remoteCluster *helpers.TeleInstance + // trustedClusterSpec specifies the trusted cluster specs. + trustedClusterSpec types.TrustedClusterSpecV2 + + setup *testSetup + reconcilers.ResourceWithoutLabelsAdapter[types.TrustedCluster] +} + +func (r *trustedClusterV2TestingPrimitives) Init(setup *testSetup) { + r.setup = setup +} + +func (r *trustedClusterV2TestingPrimitives) SetupTeleportFixtures(ctx context.Context) error { + return nil +} + +func (r *trustedClusterV2TestingPrimitives) CreateTeleportResource(ctx context.Context, name string) error { + trustedCluster, err := types.NewTrustedCluster(name, r.trustedClusterSpec) + if err != nil { + return trace.Wrap(err) + } + trustedCluster.SetOrigin(types.OriginKubernetes) + _, err = r.setup.TeleportClient.CreateTrustedCluster(ctx, trustedCluster) + return trace.Wrap(err) +} + +func (r *trustedClusterV2TestingPrimitives) GetTeleportResource(ctx context.Context, name string) (types.TrustedCluster, error) { + return r.setup.TeleportClient.GetTrustedCluster(ctx, name) +} + +func (r *trustedClusterV2TestingPrimitives) DeleteTeleportResource(ctx context.Context, name string) error { + return trace.Wrap(r.setup.TeleportClient.DeleteTrustedCluster(ctx, name)) +} + +func (r *trustedClusterV2TestingPrimitives) CreateKubernetesResource(ctx context.Context, name string) error { + trustedCluster := &resourcesv1.TeleportTrustedClusterV2{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: r.setup.Namespace.Name, + }, + Spec: resourcesv1.TeleportTrustedClusterV2Spec(r.trustedClusterSpec), + } + return trace.Wrap(r.setup.K8sClient.Create(ctx, trustedCluster)) +} + +func (r *trustedClusterV2TestingPrimitives) DeleteKubernetesResource(ctx context.Context, name string) error { + trustedCluster := &resourcesv1.TeleportTrustedClusterV2{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: r.setup.Namespace.Name, + }, + } + return trace.Wrap(r.setup.K8sClient.Delete(ctx, trustedCluster)) +} + +func (r *trustedClusterV2TestingPrimitives) GetKubernetesResource(ctx context.Context, name string) (*resourcesv1.TeleportTrustedClusterV2, error) { + trustedCluster := &resourcesv1.TeleportTrustedClusterV2{} + obj := kclient.ObjectKey{ + Name: name, + Namespace: r.setup.Namespace.Name, + } + err := r.setup.K8sClient.Get(ctx, obj, trustedCluster) + return trustedCluster, trace.Wrap(err) +} + +func (r *trustedClusterV2TestingPrimitives) ModifyKubernetesResource(ctx context.Context, name string) error { + trustedCluster, err := r.GetKubernetesResource(ctx, name) + if err != nil { + return trace.Wrap(err) + } + trustedCluster.Spec.RoleMap[0] = types.RoleMapping{ + Remote: "remote-admin", + Local: []string{"local-dev"}, + } + return trace.Wrap(r.setup.K8sClient.Update(ctx, trustedCluster)) +} + +func (r *trustedClusterV2TestingPrimitives) CompareTeleportAndKubernetesResource(tResource types.TrustedCluster, kubeResource *resourcesv1.TeleportTrustedClusterV2) (bool, string) { + diff := cmp.Diff(tResource, kubeResource.ToTeleport(), testlib.CompareOptions()...) + return diff == "", diff +} + +// setupTest initializes a remote cluster for testing trusted clusters. +func (r *trustedClusterV2TestingPrimitives) setupTest(t *testing.T, clusterName string) { + ctx := context.Background() + + remoteCluster := helpers.NewInstance(t, helpers.InstanceConfig{ + ClusterName: clusterName, + HostID: uuid.New().String(), + NodeName: helpers.Loopback, + Logger: utils.NewSlogLoggerForTests(), + }) + r.remoteCluster = remoteCluster + + rcConf := servicecfg.MakeDefaultConfig() + rcConf.DataDir = t.TempDir() + rcConf.Auth.Enabled = true + rcConf.Proxy.Enabled = true + rcConf.Proxy.DisableWebInterface = true + rcConf.Version = "v2" + + lib.SetInsecureDevMode(true) + t.Cleanup(func() { lib.SetInsecureDevMode(false) }) + + require.NoError(t, remoteCluster.CreateEx(t, nil, rcConf)) + require.NoError(t, remoteCluster.Start()) + t.Cleanup(func() { require.NoError(t, remoteCluster.StopAll()) }) + + // Create trusted cluster join token + token := "secret_token" + tokenResource, err := types.NewProvisionToken(token, []types.SystemRole{types.RoleTrustedCluster}, time.Time{}) + require.NoError(t, err) + remoteCluster.Process.GetAuthServer().UpsertToken(ctx, tokenResource) + + // Create required role + localDev := "local-dev" + require.NoError(t, teleportCreateDummyRole(ctx, localDev, r.setup.TeleportClient)) + + r.trustedClusterSpec = types.TrustedClusterSpecV2{ + Enabled: true, + Token: token, + ProxyAddress: remoteCluster.Web, + ReverseTunnelAddress: remoteCluster.ReverseTunnel, + RoleMap: []types.RoleMapping{ + { + Remote: "remote-dev", + Local: []string{localDev}, + }, + }, + } +} + +func TestTrustedClusterV2Creation(t *testing.T) { + test := &trustedClusterV2TestingPrimitives{} + setup := testlib.SetupTestEnv(t) + test.Init(setup) + ctx := context.Background() + + resourceName := "remote.example.com" + test.setupTest(t, resourceName) + + require.NoError(t, test.CreateKubernetesResource(ctx, resourceName)) + + var resource types.TrustedCluster + var err error + testlib.FastEventually(t, func() bool { + resource, err = test.GetTeleportResource(ctx, resourceName) + return !trace.IsNotFound(err) + }) + require.NoError(t, err) + require.Equal(t, resourceName, test.GetResourceName(resource)) + require.Equal(t, types.OriginKubernetes, test.GetResourceOrigin(resource)) + + err = test.DeleteKubernetesResource(ctx, resourceName) + require.NoError(t, err) + + testlib.FastEventually(t, func() bool { + _, err = test.GetTeleportResource(ctx, resourceName) + return trace.IsNotFound(err) + }) +} + +func TestTrustedClusterV2DeletionDrift(t *testing.T) { + test := &trustedClusterV2TestingPrimitives{} + setup := testlib.SetupTestEnv(t) + test.Init(setup) + ctx := context.Background() + + resourceName := "remote.example.com" + test.setupTest(t, resourceName) + + require.NoError(t, test.CreateKubernetesResource(ctx, resourceName)) + + var resource types.TrustedCluster + var err error + testlib.FastEventually(t, func() bool { + resource, err = test.GetTeleportResource(ctx, resourceName) + return !trace.IsNotFound(err) + }) + require.NoError(t, err) + require.Equal(t, resourceName, test.GetResourceName(resource)) + require.Equal(t, types.OriginKubernetes, test.GetResourceOrigin(resource)) + + // We cause a drift by altering the Teleport resource. + // To make sure the operator does not reconcile while we're finished we suspend the operator + setup.StopKubernetesOperator() + + err = test.DeleteTeleportResource(ctx, resourceName) + require.NoError(t, err) + testlib.FastEventually(t, func() bool { + _, err = test.GetTeleportResource(ctx, resourceName) + return trace.IsNotFound(err) + }) + + // We flag the resource for deletion in Kubernetes (it won't be fully removed until the operator has processed it and removed the finalizer) + err = test.DeleteKubernetesResource(ctx, resourceName) + require.NoError(t, err) + + // Test section: We resume the operator, it should reconcile and recover from the drift + setup.StartKubernetesOperator(t) + + // The operator should handle the failed Teleport deletion gracefully and unlock the Kubernetes resource deletion + testlib.FastEventually(t, func() bool { + _, err = test.GetKubernetesResource(ctx, resourceName) + return kerrors.IsNotFound(err) + }) +} + +func TestTrustedClusterV2Update(t *testing.T) { + test := &trustedClusterV2TestingPrimitives{} + setup := testlib.SetupTestEnv(t) + test.Init(setup) + ctx := context.Background() + + resourceName := "remote.example.com" + test.setupTest(t, resourceName) + + // The resource is created in Teleport + require.NoError(t, test.CreateTeleportResource(ctx, resourceName)) + + // The resource is created in Kubernetes, with at least a field altered + require.NoError(t, test.CreateKubernetesResource(ctx, resourceName)) + + // Check the resource was updated in Teleport + testlib.FastEventuallyWithT(t, func(c *assert.CollectT) { + tResource, err := test.GetTeleportResource(ctx, resourceName) + require.NoError(c, err) + + kubeResource, err := test.GetKubernetesResource(ctx, resourceName) + require.NoError(c, err) + + // Kubernetes and Teleport resources are in-sync + equal, diff := test.CompareTeleportAndKubernetesResource(tResource, kubeResource) + if !equal { + t.Logf("Kubernetes and Teleport resources not sync-ed yet: %s", diff) + } + assert.True(c, equal) + }) + + // Updating the resource in Kubernetes + // The modification can fail because of a conflict with the resource controller. We retry if that happens. + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + return test.ModifyKubernetesResource(ctx, resourceName) + }) + require.NoError(t, err) + + // Check the resource was updated in Teleport + testlib.FastEventuallyWithT(t, func(c *assert.CollectT) { + kubeResource, err := test.GetKubernetesResource(ctx, resourceName) + require.NoError(c, err) + + tResource, err := test.GetTeleportResource(ctx, resourceName) + require.NoError(c, err) + + // Kubernetes and Teleport resources are in-sync + equal, diff := test.CompareTeleportAndKubernetesResource(tResource, kubeResource) + if !equal { + t.Logf("Kubernetes and Teleport resources not sync-ed yet: %s", diff) + } + assert.True(c, equal) + }) + + // Delete the resource to avoid leftover state. + err = test.DeleteTeleportResource(ctx, resourceName) + require.NoError(t, err) +} + +func TestTrustedClusterV2SecretLookup(t *testing.T) { + test := &trustedClusterV2TestingPrimitives{} + setup := testlib.SetupTestEnv(t) + test.Init(setup) + ctx := context.Background() + + resourceName := "remote.example.com" + test.setupTest(t, resourceName) + + secretName := validRandomResourceName("trusted-cluster-secret") + secretKey := "token" + secretValue := test.trustedClusterSpec.Token + + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: setup.Namespace.Name, + Annotations: map[string]string{ + secretlookup.AllowLookupAnnotation: resourceName, + }, + }, + StringData: map[string]string{ + secretKey: secretValue, + }, + Type: v1.SecretTypeOpaque, + } + kubeClient := setup.K8sClient + require.NoError(t, kubeClient.Create(ctx, secret)) + + test.trustedClusterSpec.Token = "secret://" + secretName + "/" + secretKey + require.NoError(t, test.CreateKubernetesResource(ctx, resourceName)) + + testlib.FastEventually(t, func() bool { + trustedCluster, err := test.GetTeleportResource(ctx, resourceName) + if err != nil { + return false + } + return trustedCluster.GetToken() == secretValue + }) +} diff --git a/integrations/operator/crdgen/additional_doc.go b/integrations/operator/crdgen/additional_doc.go index 2458af6779bbc..396c94410808f 100644 --- a/integrations/operator/crdgen/additional_doc.go +++ b/integrations/operator/crdgen/additional_doc.go @@ -29,4 +29,7 @@ var additionalDescription = map[string]map[string]string{ "OIDCConnectorSpecV3": { "ClientSecret": supportsSecretLookupDescription, }, + "TrustedClusterSpecV2": { + "Token": supportsSecretLookupDescription, + }, } diff --git a/integrations/operator/crdgen/handlerequest.go b/integrations/operator/crdgen/handlerequest.go index 66d90324cc0f4..57f479de185e3 100644 --- a/integrations/operator/crdgen/handlerequest.go +++ b/integrations/operator/crdgen/handlerequest.go @@ -213,6 +213,7 @@ func generateSchema(file *File, groupName string, format crdFormatFunc, resp *go withAdditionalColumns(serverColumns), }, }, + {name: "TrustedClusterV2", opts: []resourceSchemaOption{withVersionInKindOverride()}}, } for _, resource := range resources { diff --git a/integrations/operator/crdgen/ignored.go b/integrations/operator/crdgen/ignored.go index 596e79b4d291a..7b647756f12cc 100644 --- a/integrations/operator/crdgen/ignored.go +++ b/integrations/operator/crdgen/ignored.go @@ -44,4 +44,7 @@ var ignoredFields = map[string]stringSet{ // allows remote exec on agentful nodes. "CmdLabels": struct{}{}, }, + "TrustedClusterSpecV2": { + "Roles": struct{}{}, // Deprecated, use RoleMap instead. + }, } diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_openssheiceserversv2.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_openssheiceserversv2.yaml index 3617909ae6a67..bad8469a76fb6 100644 --- a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_openssheiceserversv2.yaml +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_openssheiceserversv2.yaml @@ -88,6 +88,20 @@ spec: type: string type: object type: object + github: + description: GitHub contains info about GitHub proxies where each + server represents a GitHub organization. + nullable: true + properties: + integration: + description: Integration is the integration that is associated + with this Server. + type: string + organization: + description: Organization specifies the name of the organization + for the GitHub integration. + type: string + type: object hostname: description: Hostname is server hostname type: string diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_opensshserversv2.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_opensshserversv2.yaml index ad7dfd4174776..fe3d76a8db7a4 100644 --- a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_opensshserversv2.yaml +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_opensshserversv2.yaml @@ -87,6 +87,20 @@ spec: type: string type: object type: object + github: + description: GitHub contains info about GitHub proxies where each + server represents a GitHub organization. + nullable: true + properties: + integration: + description: Integration is the integration that is associated + with this Server. + type: string + organization: + description: Organization specifies the name of the organization + for the GitHub integration. + type: string + type: object hostname: description: Hostname is server hostname type: string diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_roles.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_roles.yaml index 5d1c5ddfb9809..9e3a0f46e9334 100644 --- a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_roles.yaml +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_roles.yaml @@ -157,6 +157,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -595,6 +607,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object deny: description: Deny is the set of conditions evaluated to deny access. @@ -722,6 +745,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -1160,6 +1195,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object options: description: Options is for OpenSSH options like agent forwarding. @@ -1584,6 +1630,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -2022,6 +2080,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object deny: description: Deny is the set of conditions evaluated to deny access. @@ -2149,6 +2218,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -2587,6 +2668,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object options: description: Options is for OpenSSH options like agent forwarding. diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv6.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv6.yaml index f0af70fc7cf2f..5e1ff2a359184 100644 --- a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv6.yaml +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv6.yaml @@ -160,6 +160,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -598,6 +610,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object deny: description: Deny is the set of conditions evaluated to deny access. @@ -725,6 +748,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -1163,6 +1198,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object options: description: Options is for OpenSSH options like agent forwarding. diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv7.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv7.yaml index 88056b0b54a53..fb682402d11e3 100644 --- a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv7.yaml +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_rolesv7.yaml @@ -160,6 +160,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -598,6 +610,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object deny: description: Deny is the set of conditions evaluated to deny access. @@ -725,6 +748,18 @@ spec: type: string nullable: true type: array + github_permissions: + description: GitHubPermissions defines GitHub integration related + permissions. + items: + properties: + orgs: + items: + type: string + nullable: true + type: array + type: object + type: array group_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -1163,6 +1198,17 @@ spec: type: string nullable: true type: array + workload_identity_labels: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: WorkloadIdentityLabels controls whether or not specific + WorkloadIdentity resources can be invoked. Further authorization + controls exist on the WorkloadIdentity resource itself. + type: object + workload_identity_labels_expression: + description: WorkloadIdentityLabelsExpression is a predicate expression + used to allow/deny access to issuing a WorkloadIdentity. + type: string type: object options: description: Options is for OpenSSH options like agent forwarding. diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_trustedclustersv2.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_trustedclustersv2.yaml new file mode 100644 index 0000000000000..4cf1410472b64 --- /dev/null +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_trustedclustersv2.yaml @@ -0,0 +1,149 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: teleporttrustedclustersv2.resources.teleport.dev +spec: + group: resources.teleport.dev + names: + kind: TeleportTrustedClusterV2 + listKind: TeleportTrustedClusterV2List + plural: teleporttrustedclustersv2 + shortNames: + - trustedclusterv2 + - trustedclustersv2 + singular: teleporttrustedclusterv2 + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: TrustedClusterV2 is the Schema for the trustedclustersv2 API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrustedCluster resource definition v2 from Teleport + properties: + enabled: + description: Enabled is a bool that indicates if the TrustedCluster + is enabled or disabled. Setting Enabled to false has a side effect + of deleting the user and host certificate authority (CA). + type: boolean + role_map: + description: RoleMap specifies role mappings to remote roles. + items: + properties: + local: + description: Local specifies local roles to map to + items: + type: string + nullable: true + type: array + remote: + description: Remote specifies remote role name to map from + type: string + type: object + type: array + token: + description: Token is the authorization token provided by another + cluster needed by this cluster to join. This field supports secret + lookup. See the operator documentation for more details. + type: string + tunnel_addr: + description: ReverseTunnelAddress is the address of the SSH proxy + server of the cluster to join. If not set, it is derived from `:`. + type: string + web_proxy_addr: + description: ProxyAddress is the address of the web proxy server of + the cluster to join. If not set, it is derived from `:`. + type: string + type: object + status: + description: Status defines the observed state of the Teleport resource + properties: + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + teleportResourceID: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_users.yaml b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_users.yaml index 504c3695c4532..0c68b6dec714f 100644 --- a/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_users.yaml +++ b/integrations/operator/crdgen/testdata/golden/resources.teleport.dev_users.yaml @@ -57,6 +57,10 @@ spec: description: SAMLSingleLogoutURL is the SAML Single log-out URL to initiate SAML SLO (single log-out), if applicable. type: string + user_id: + description: UserID is the ID of the identity. Some connectors + like GitHub have an unique ID apart from the username. + type: string username: description: Username is username supplied by external identity provider @@ -76,6 +80,10 @@ spec: description: SAMLSingleLogoutURL is the SAML Single log-out URL to initiate SAML SLO (single log-out), if applicable. type: string + user_id: + description: UserID is the ID of the identity. Some connectors + like GitHub have an unique ID apart from the username. + type: string username: description: Username is username supplied by external identity provider @@ -101,6 +109,10 @@ spec: description: SAMLSingleLogoutURL is the SAML Single log-out URL to initiate SAML SLO (single log-out), if applicable. type: string + user_id: + description: UserID is the ID of the identity. Some connectors + like GitHub have an unique ID apart from the username. + type: string username: description: Username is username supplied by external identity provider diff --git a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/authservice.proto b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/authservice.proto index 03b6f9ac35439..fc6dc146ff248 100644 --- a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/authservice.proto +++ b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/authservice.proto @@ -333,7 +333,13 @@ message RouteToApp { // GCPServiceAccount is the GCP service account to assume when accessing GCP API. string GCPServiceAccount = 7 [(gogoproto.jsontag) = "gcp_service_account,omitempty"]; // URI is the URI of the app. This is the internal endpoint where the application is running and isn't user-facing. + // Used merely for audit events and mirrors the URI from the app spec. Not used as a source of + // truth when routing connections. string URI = 8 [(gogoproto.jsontag) = "uri,omitempty"]; + // TargetPort signifies that the cert grants access to a specific port in a multi-port TCP app, as + // long as the port is defined in the app spec. When specified, it must be between 1 and 65535. + // Used only for routing, should not be used in other contexts (e.g., access requests). + uint32 TargetPort = 9 [(gogoproto.jsontag) = "target_port,omitempty"]; } // GetUserRequest specifies parameters for the GetUser method. @@ -1934,6 +1940,60 @@ message CreateRegisterChallengeRequest { DeviceUsage DeviceUsage = 3 [(gogoproto.jsontag) = "device_usage,omitempty"]; } +// IdentityCenterAccount holds information about an Identity Center account +// within an IdentityCenterAccountAssignment +message IdentityCenterAccount { + // ID is the AWS-assigned account ID + string ID = 1; + + // ARN is the full Amazon Resource Name for the AWS account + string ARN = 2; + + // AccountName is the human-readable name of the account + string AccountName = 3; + + // Description is a free text description of the account + string Description = 4; +} + +// IdentityCenterPermissionSet holds information about an Identity Center +// permission set within an IdentityCenterAccountAssignment +message IdentityCenterPermissionSet { + // ARN is the full Amazon Resource Name for the Permission Set + string ARN = 1; + + // Name is the human readable name for the Permission Set + string Name = 2; +} + +// IdentityCenterAccountAssignment represents a requestable Identity Center +// Account Assignment. This is strictly a wire-format object for use with the +// Unfied resource cache, and the types defined in the `identitycenter` package +// should be used for actual processing. +message IdentityCenterAccountAssignment { + // Kind is the database server resource kind. + string Kind = 1 [(gogoproto.jsontag) = "kind"]; + // SubKind is an optional resource subkind. + string SubKind = 2 [(gogoproto.jsontag) = "sub_kind,omitempty"]; + // Version is the resource version. + string Version = 3 [(gogoproto.jsontag) = "version"]; + // Metadata is the account metadata. + types.Metadata Metadata = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "metadata" + ]; + + // DisplayName is a human-readable name for the Account assignment + string DisplayName = 5; + + // Account is the Identity Center Account this assigment references + IdentityCenterAccount Account = 6; + + // PermissionSet is the Identity Center Permission Set this assignment + // references + IdentityCenterPermissionSet PermissionSet = 7; +} + // PaginatedResource represents one of the supported resources. message PaginatedResource { // Resource is the resource itself. @@ -1962,6 +2022,12 @@ message PaginatedResource { types.AppServerOrSAMLIdPServiceProviderV1 AppServerOrSAMLIdPServiceProvider = 11 [deprecated = true]; // SAMLIdPServiceProvider represents a SAML IdP service provider resource. types.SAMLIdPServiceProviderV1 SAMLIdPServiceProvider = 12 [(gogoproto.jsontag) = "saml_idp_service_provider,omitempty"]; + // GitServer represents a Git server resource. + types.ServerV2 git_server = 15; + + // IdentityCenterAccountAssignment represents a requestable Identity Center + // Account Assignment + IdentityCenterAccountAssignment IdentityCenterAccountAssignment = 16 [(gogoproto.jsontag) = "identity_center_account_assignment,omitempty"]; } // Logins allowed for the included resource. Only to be populated for SSH and Desktops. @@ -2079,6 +2145,36 @@ message ListResourcesRequest { bool IncludeLogins = 13 [(gogoproto.jsontag) = "include_logins,omitempty"]; } +// ResolveSSHTargetRequest provides details about a server to be resolved in +// an equivalent manner to a ssh dial request. +// +// Resolution can happen in two modes: +// 1) searching for hosts based on labels, a predicate expression, or keywords +// 2) searching based on hostname +// +// If a Host is provided, resolution will only operate in the second mode and +// will not perform any resolution based on labels. In order to resolve via +// labels the Host must not be populated. +message ResolveSSHTargetRequest { + // The target host as would be sent to the proxy during a dial request. + string host = 1; + // The ssh port. This value is optional, and both empty string and "0" are typically + // treated as meaning that any port should match. + string port = 2; + // If not empty, a label-based matcher. + map labels = 3; + // Boolean conditions that will be matched against the resource. + string predicate_expression = 4; + // A list of search keywords to match against resource field values. + repeated string search_keywords = 5; +} + +// GetSSHTargetsResponse holds ssh servers that match an ssh targets request. +message ResolveSSHTargetResponse { + // The target matching the supplied request. + types.ServerV2 server = 1; +} + // GetSSHTargetsRequest gets all servers that might match an equivalent ssh dial request. message GetSSHTargetsRequest { // Host is the target host as would be sent to the proxy during a dial request. @@ -2349,16 +2445,21 @@ message DownstreamInventoryOneOf { } } -// DownstreamInventoryPing is sent down the inventory control stream for testing/debug -// purposes. +// DownstreamInventoryPing is sent down the inventory control stream. message DownstreamInventoryPing { uint64 ID = 1; } // UpstreamInventoryPong is sent up the inventory control stream in response to a downstream -// ping (used for testing/debug purposes). +// ping including the system clock of the downstream. message UpstreamInventoryPong { uint64 ID = 1; + // SystemClock advertises the system clock of the upstream. + google.protobuf.Timestamp SystemClock = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "system_clock,omitempty" + ]; } // UpstreamInventoryHello is the hello message sent up the inventory control stream. @@ -2553,10 +2654,10 @@ message InventoryPingRequest { // ServerID is the ID of the instance to ping. string ServerID = 1; - // ControlLog forces the ping to use the standard "commit then act" model of control log synchronization - // for the ping. This significantly increases the amount of time it takes for the ping request to - // complete, but is useful for testing/debugging control log issues. - bool ControlLog = 2; + // ControlLog used to signal that the ping should use the control log synchronization. + // + // Deprecated: the control log is unsupported and unsound to use. + bool ControlLog = 2 [deprecated = true]; } // InventoryPingResponse returns the result of an inventory ping initiated via an @@ -3207,7 +3308,11 @@ service AuthService { // GetTrustedClusters gets all current Trusted Cluster resources. rpc GetTrustedClusters(google.protobuf.Empty) returns (types.TrustedClusterV2List); // UpsertTrustedCluster upserts a Trusted Cluster in a backend. - rpc UpsertTrustedCluster(types.TrustedClusterV2) returns (types.TrustedClusterV2); + // + // Deprecated: Use [teleport.trust.v1.UpsertTrustedCluster] instead. + rpc UpsertTrustedCluster(types.TrustedClusterV2) returns (types.TrustedClusterV2) { + option deprecated = true; + } // DeleteTrustedCluster deletes an existing Trusted Cluster in a backend by name. rpc DeleteTrustedCluster(types.ResourceRequest) returns (google.protobuf.Empty); @@ -3482,6 +3587,9 @@ service AuthService { // but may result in confusing behavior if it is used outside of those contexts. rpc GetSSHTargets(GetSSHTargetsRequest) returns (GetSSHTargetsResponse); + // ResolveSSHTarget returns the server that would be resolved in an equivalent ssh dial request. + rpc ResolveSSHTarget(ResolveSSHTargetRequest) returns (ResolveSSHTargetResponse); + // GetDomainName returns local auth domain of the current auth server rpc GetDomainName(google.protobuf.Empty) returns (GetDomainNameResponse); // GetClusterCACert returns the PEM-encoded TLS certs for the local cluster diff --git a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/event.proto b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/event.proto index 7c0cd043eb13d..b8c39fec6054e 100644 --- a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/event.proto +++ b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/client/proto/event.proto @@ -34,6 +34,7 @@ import "teleport/secreports/v1/secreports.proto"; import "teleport/userloginstate/v1/userloginstate.proto"; import "teleport/userprovisioning/v2/statichostuser.proto"; import "teleport/usertasks/v1/user_tasks.proto"; +import "teleport/workloadidentity/v1/resource.proto"; option go_package = "github.com/gravitational/teleport/api/client/proto"; @@ -206,5 +207,9 @@ message Event { // IdentityCenterAccountlAssignment is a resource representing a potential // Permission Set grant on a specific AWS account. teleport.identitycenter.v1.AccountAssignment IdentityCenterAccountAssignment = 74; + // PluginStaticCredentials is filled in PluginStaticCredentials related events + types.PluginStaticCredentialsV1 PluginStaticCredentials = 75; + // WorkloadIdentity is a resource for workload identity. + teleport.workloadidentity.v1.WorkloadIdentity WorkloadIdentity = 76; } } diff --git a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/events/events.proto b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/events/events.proto index bd61c99381b62..c82a6e6976e0b 100644 --- a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/events/events.proto +++ b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/events/events.proto @@ -1547,6 +1547,33 @@ message AccessRequestCreate { ]; } +// AccessRequestExpire is emitted when access request has expired. +message AccessRequestExpire { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // RequestID is access request ID + string RequestID = 3 [(gogoproto.jsontag) = "id"]; + + // ResourceExpiry is the time at which the access request resource will expire. + google.protobuf.Timestamp ResourceExpiry = 4 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "expiry,omitempty" + ]; +} + // ResourceID is a unique identifier for a teleport resource. This is duplicated // from api/types/types.proto to decouple the api and events types and because // neither file currently imports the other. @@ -1617,6 +1644,21 @@ message PortForward { // Addr is a target port forwarding address string Addr = 5 [(gogoproto.jsontag) = "addr"]; + + // KubernetesCluster has information about a kubernetes cluster, if + // applicable. + KubernetesClusterMetadata KubernetesCluster = 6 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // KubernetesPod has information about a kubernetes pod, if applicable. + KubernetesPodMetadata KubernetesPod = 7 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; } // X11Forward is emitted when a user requests X11 protocol forwarding @@ -2675,6 +2717,9 @@ message AppMetadata { ]; // AppName is the configured application name. string AppName = 4 [(gogoproto.jsontag) = "app_name,omitempty"]; + // AppTargetPort signifies that the app is a multi-port TCP app and says which port was used to + // access the app. This field is not set for other types of apps, including single-port TCP apps. + uint32 AppTargetPort = 5 [(gogoproto.jsontag) = "app_target_port,omitempty"]; } // AppCreate is emitted when a new application resource is created. @@ -3105,6 +3150,12 @@ message DatabaseSessionStart { // connection. This can be useful for backend process cancellation or // termination and it is not a sensitive or secret value. uint32 PostgresPID = 8 [(gogoproto.jsontag) = "postgres_pid,omitempty"]; + // Client is the common client event metadata. + ClientMetadata Client = 9 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; } // DatabaseSessionQuery is emitted when a user executes a database query. @@ -4330,6 +4381,8 @@ message IntegrationMetadata { AWSOIDCIntegrationMetadata AWSOIDC = 2 [(gogoproto.jsontag) = "aws_oidc,omitempty"]; // AzureOIDC contains metadata for Azure OIDC integrations. AzureOIDCIntegrationMetadata AzureOIDC = 3 [(gogoproto.jsontag) = "azure_oidc,omitempty"]; + // GitHub contains metadata for GitHub integrations. + GitHubIntegrationMetadata GitHub = 4 [(gogoproto.jsontag) = "github,omitempty"]; } // AWSOIDCIntegrationMetadata contains metadata for AWS OIDC integrations. @@ -4351,6 +4404,12 @@ message AzureOIDCIntegrationMetadata { string ClientID = 2 [(gogoproto.jsontag) = "client_id,omitempty"]; } +// GitHubIntegrationMetadata contains metadata for GitHub integrations. +message GitHubIntegrationMetadata { + // Organization specifies the name of the organization for the GitHub integration. + string Organization = 1 [(gogoproto.jsontag) = "organization,omitempty"]; +} + // PluginCreate is emitted when a plugin resource is created. message PluginCreate { // Metadata is a common event metadata. @@ -4676,6 +4735,14 @@ message OneOf { events.UserTaskUpdate UserTaskUpdate = 189; events.UserTaskDelete UserTaskDelete = 190; events.SFTPSummary SFTPSummary = 191; + events.ContactCreate ContactCreate = 192; + events.ContactDelete ContactDelete = 193; + events.WorkloadIdentityCreate WorkloadIdentityCreate = 194; + events.WorkloadIdentityUpdate WorkloadIdentityUpdate = 195; + events.WorkloadIdentityDelete WorkloadIdentityDelete = 196; + events.GitCommand GitCommand = 197; + events.UserLoginAccessListInvalid UserLoginAccessListInvalid = 198; + events.AccessRequestExpire AccessRequestExpire = 199; } } @@ -4829,6 +4896,9 @@ message RouteToApp { string GCPServiceAccount = 7 [(gogoproto.jsontag) = "gcp_service_account,omitempty"]; // URI is the application URI. string URI = 8 [(gogoproto.jsontag) = "uri,omitempty"]; + // TargetPort signifies that the user accessed a specific port in a multi-port TCP app. The value + // must be between 1 and 65535. + uint32 TargetPort = 9 [(gogoproto.jsontag) = "target_port,omitempty"]; } // RouteToDatabase combines parameters for database service routing information. @@ -6697,6 +6767,12 @@ message SPIFFESVIDIssued { // Audiences is the list of audiences in the issued SVID. // Only present if the SVID is a JWT. repeated string Audiences = 11 [(gogoproto.jsontag) = "audiences,omitempty"]; + // The WorkloadIdentity resource that was used to issue the SVID, this will + // be empty if the legacy RPCs were used. + string WorkloadIdentity = 12 [(gogoproto.jsontag) = "workload_identity,omitempty"]; + // The revision of the WorkloadIdentity resource that was used to issue the + // SVID. This will be empty if the legacy RPCs were used. + string WorkloadIdentityRevision = 13 [(gogoproto.jsontag) = "workload_identity_revision,omitempty"]; } // AuthPreferenceUpdate is emitted when the auth preference is updated. @@ -7566,3 +7642,305 @@ message UserTaskDelete { (gogoproto.jsontag) = "" ]; } + +// ContactCreate is emitted when a contact is created. +message ContactCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // User is a common user event metadata + UserMetadata User = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // Status indicates whether the creation was successful. + Status Status = 5 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // Email is the Email of the contact being deleted + string Email = 6 [(gogoproto.jsontag) = "email"]; + + // ContactType is the type of the contact being deleted ('Business' or 'Security') + ContactType ContactType = 7 [(gogoproto.jsontag) = "contact_type"]; +} + +// ContactDelete is emitted when a contact is deleted. +message ContactDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // User is a common user event metadata + UserMetadata User = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // Status indicates whether the deletion was successful. + Status Status = 5 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // Email is the Email of the contact being deleted + string Email = 6 [(gogoproto.jsontag) = "email"]; + + // ContactType is the type of the contact being deleted ('Business' or 'Security') + ContactType ContactType = 7 [(gogoproto.jsontag) = "contact_type"]; +} + +// ContactType is the type of contact being added. +enum ContactType { + CONTACT_TYPE_UNSPECIFIED = 0; + CONTACT_TYPE_BUSINESS = 1; + CONTACT_TYPE_SECURITY = 2; +} + +// WorkloadIdentityCreate is emitted when a WorkloadIdentity is created. +message WorkloadIdentityCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // User is a common user event metadata + UserMetadata User = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // WorkloadIdentityData is a copy of the WorkloadIdentity resource + google.protobuf.Struct WorkloadIdentityData = 5 [ + (gogoproto.jsontag) = "workload_identity_data,omitempty", + (gogoproto.casttype) = "Struct" + ]; +} + +// WorkloadIdentityUpdate is emitted when a WorkloadIdentity is updated. +message WorkloadIdentityUpdate { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // User is a common user event metadata + UserMetadata User = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // WorkloadIdentityData is a copy of the WorkloadIdentity resource + google.protobuf.Struct WorkloadIdentityData = 5 [ + (gogoproto.jsontag) = "workload_identity_data,omitempty", + (gogoproto.casttype) = "Struct" + ]; +} + +// WorkloadIdentityDelete is emitted when a WorkloadIdentity is deleted. +message WorkloadIdentityDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // User is a common user event metadata + UserMetadata User = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; +} + +// GitCommand is emitted when a user performs a Git fetch or push command. +message GitCommand { + // Metadata is a common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // User is a common user event metadata + UserMetadata User = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 4 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 5 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // CommandMetadata is a common command metadata + CommandMetadata Command = 6 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // Service is the type of the git request like git-upload-pack or + // git-receive-pack. + string service = 8 [(gogoproto.jsontag) = "service"]; + // Path is the Git repo path, usually /. + string path = 9 [(gogoproto.jsontag) = "path"]; + + // Actions defines details for a Git push. + repeated GitCommandAction actions = 10 [(gogoproto.jsontag) = "actions,omitempty"]; +} + +// GitCommandAction defines details for a Git push. +message GitCommandAction { + // Action type like create or update. + string Action = 1 [(gogoproto.jsontag) = "action,omitempty"]; + // Reference name like ref/main/my_branch. + string Reference = 2 [(gogoproto.jsontag) = "reference,omitempty"]; + // Old is the old hash. + string Old = 3 [(gogoproto.jsontag) = "old,omitempty"]; + // New is the new hash. + string New = 4 [(gogoproto.jsontag) = "new,omitempty"]; +} + +// AccessListInvalidMetadata contains metadata about invalid access lists. +message AccessListInvalidMetadata { + // AccessListName is the name of the invalid access list. + string AccessListName = 1 [(gogoproto.jsontag) = "access_list_name, omitempty"]; + // User is the username of the access list member who attempted to log in. + string User = 2 [(gogoproto.jsontag) = "user,omitempty"]; + // MissingRoles are the names of the non-existent roles being referenced by the access list, causing it to be invalid. + repeated string MissingRoles = 3 [(gogoproto.jsontag) = "missing_roles,omitempty"]; +} + +// UserLoginAccessListInvalid is emitted when a user who is a member of an invalid +// access list logs in. It is used to indicate that the access list could not be +// applied to the user's session. +message UserLoginAccessListInvalid { + // Metadata is common event metadata + Metadata Metadata = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // AccessListInvalidMetadata is the metadata for this access list invalid event. + AccessListInvalidMetadata AccessListInvalidMetadata = 2 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; + + // Status contains fields to indicate whether attempt was successful or not. + Status Status = 3 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "" + ]; +} diff --git a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/trusted_device_requirement.proto b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/trusted_device_requirement.proto new file mode 100644 index 0000000000000..9f074c9e76465 --- /dev/null +++ b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/trusted_device_requirement.proto @@ -0,0 +1,37 @@ +// Copyright 2024 Gravitational, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package types; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/gravitational/teleport/api/types"; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// TrustedDeviceRequirement indicates whether access may be hindered by the lack +// of a trusted device. +enum TrustedDeviceRequirement { + // Device requirement not determined. + // Does not mean that a device is not required, only that the necessary data + // was not considered. + TRUSTED_DEVICE_REQUIREMENT_UNSPECIFIED = 0; + // Trusted device not required. + TRUSTED_DEVICE_REQUIREMENT_NOT_REQUIRED = 1; + // Trusted device required by either cluster mode or user roles. + TRUSTED_DEVICE_REQUIREMENT_REQUIRED = 2; +} diff --git a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/types.proto b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/types.proto index 7739ecad6c7a0..f241f6501956e 100644 --- a/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/types.proto +++ b/integrations/operator/crdgen/testdata/protofiles/teleport/legacy/types/types.proto @@ -21,6 +21,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "teleport/attestation/v1/attestation.proto"; +import "teleport/legacy/types/trusted_device_requirement.proto"; import "teleport/legacy/types/wrappers/wrappers.proto"; option go_package = "github.com/gravitational/teleport/api/types"; @@ -711,6 +712,31 @@ message InstanceSpecV1 { // ExternalUpgraderVersion identifies the external upgrader version. Empty if no upgrader is defined. string ExternalUpgraderVersion = 8 [(gogoproto.jsontag) = "ext_upgrader_version,omitempty"]; + + // LastMeasurement stores information about the latest measurement between services. + SystemClockMeasurement LastMeasurement = 9; +} + +// SystemClockMeasurement represents the measurement state of the systems clock difference. +message SystemClockMeasurement { + // ControllerSystemClock is the system clock of the inventory controller. + google.protobuf.Timestamp ControllerSystemClock = 1 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "controller_system_clock,omitempty" + ]; + // SystemClock is the system clock of the upstream. + google.protobuf.Timestamp SystemClock = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "system_clock,omitempty" + ]; + // RequestDuration stores information about the request duration between auth and remote service. + google.protobuf.Duration RequestDuration = 3 [ + (gogoproto.jsontag) = "request_duration", + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true + ]; } // InstanceControlLogEntry represents an entry in a given instance's control log. The control log of @@ -842,6 +868,9 @@ message ServerSpecV2 { // CloudMetadata contains info about the cloud instance the server is running // on, if any. CloudMetadata CloudMetadata = 14 [(gogoproto.jsontag) = "cloud_metadata,omitempty"]; + // GitHub contains info about GitHub proxies where each server represents a + // GitHub organization. + GitHubServerMetadata git_hub = 15 [(gogoproto.jsontag) = "github,omitempty"]; reserved 8; reserved 10; @@ -875,6 +904,15 @@ message CloudMetadata { AWSInfo AWS = 1 [(gogoproto.jsontag) = "aws,omitempty"]; } +// GitHubServerMetadata contains info about GitHub proxies where each server +// represents a GitHub organization. +message GitHubServerMetadata { + // Organization specifies the name of the organization for the GitHub integration. + string organization = 1 [(gogoproto.jsontag) = "organization,omitempty"]; + // Integration is the integration that is associated with this Server. + string integration = 2 [(gogoproto.jsontag) = "integration,omitempty"]; +} + // AppServerV3 represents a single proxied web app. message AppServerV3 { option (gogoproto.goproto_stringer) = false; @@ -971,6 +1009,10 @@ message IdentityCenterPermissionSet { // Name is the human-readable name of the Permission Set. string Name = 2 [(gogoproto.jsontag) = "name,omitempty"]; + + // AssignmentID is the ID of the Teelport Account Assignment resource that + // represents this permission being assigned on the enclosing Account. + string AssignmentID = 3 [(gogoproto.jsontag) = "assignment_name,omitempty"]; } // AppIdentityCenter encapsulates information about an AWS Identity Center @@ -1016,6 +1058,11 @@ message AppSpecV3 { // IdentityCenter encasulates AWS identity-center specific information. Only // valid for Identity Center account apps. AppIdentityCenter IdentityCenter = 12 [(gogoproto.jsontag) = "identity_center,omitempty"]; + // TCPPorts is a list of ports and port ranges that an app agent can forward connections to. + // Only applicable to TCP App Access. + // If this field is not empty, URI is expected to contain no port number and start with the tcp + // protocol. + repeated PortRange TCPPorts = 13 [(gogoproto.jsontag) = "tcp_ports,omitempty"]; } // AppServerOrSAMLIdPServiceProviderV1 holds either an AppServerV3 or a SAMLIdPServiceProviderV1 resource (never both). @@ -1057,6 +1104,20 @@ message Header { string Value = 2 [(gogoproto.jsontag) = "value"]; } +// PortRange describes a port range for TCP apps. The range starts with Port and ends with EndPort. +// PortRange can be used to describe a single port in which case the Port field is the port and the +// EndPort field is 0. +message PortRange { + option (gogoproto.goproto_stringer) = false; + option (gogoproto.stringer) = false; + // Port describes the start of the range. It must be between 1 and 65535. + uint32 Port = 1 [(gogoproto.jsontag) = "port"]; + // EndPort describes the end of the range, inclusive. If set, it must be between 2 and 65535 and + // be greater than Port when describing a port range. When omitted or set to zero, it signifies + // that the port range defines a single port. + uint32 EndPort = 2 [(gogoproto.jsontag) = "end_port,omitempty"]; +} + // CommandLabelV2 is a label that has a value as a result of the // output generated by running command, e.g. hostname message CommandLabelV2 { @@ -2668,6 +2729,13 @@ message AccessRequestSpecV3 { (gogoproto.nullable) = true, (gogoproto.jsontag) = "assume_start_time,omitempty" ]; + + // ResourceExpiry is the time at which the access request resource will expire. + google.protobuf.Timestamp ResourceExpiry = 22 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "expiry,omitempty" + ]; } enum AccessRequestScope { @@ -2759,6 +2827,7 @@ message RequestKubernetesResource { } // ResourceID is a unique identifier for a teleport resource. +// Must be kept in sync with teleport.decision.v1alpha1.ResourceId. message ResourceID { // ClusterName is the name of the cluster the resource is in. string ClusterName = 1 [(gogoproto.jsontag) = "cluster"]; @@ -3372,6 +3441,24 @@ message RoleConditions { (gogoproto.nullable) = false, (gogoproto.jsontag) = "account_assignments,omitempty" ]; + + // GitHubPermissions defines GitHub integration related permissions. + repeated GitHubPermission git_hub_permissions = 43 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "github_permissions,omitempty" + ]; + + // WorkloadIdentityLabels controls whether or not specific WorkloadIdentity + // resources can be invoked. Further authorization controls exist on the + // WorkloadIdentity resource itself. + wrappers.LabelValues WorkloadIdentityLabels = 44 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "workload_identity_labels,omitempty", + (gogoproto.customtype) = "Labels" + ]; + // WorkloadIdentityLabelsExpression is a predicate expression used to + // allow/deny access to issuing a WorkloadIdentity. + string WorkloadIdentityLabelsExpression = 45 [(gogoproto.jsontag) = "workload_identity_labels_expression,omitempty"]; } // IdentityCenterAccountAssignment captures an AWS Identity Center account @@ -3381,6 +3468,11 @@ message IdentityCenterAccountAssignment { string Account = 2 [(gogoproto.jsontag) = "account,omitempty"]; } +// GitHubPermission defines GitHub integration related permissions. +message GitHubPermission { + repeated string organizations = 1 [(gogoproto.jsontag) = "orgs,omitempty"]; +} + // SPIFFERoleCondition sets out which SPIFFE identities this role is allowed or // denied to generate. The Path matcher is required, and is evaluated first. If, // the Path does not match then the other matcher fields are not evaluated. @@ -3803,6 +3895,10 @@ message ExternalIdentity { // SAMLSingleLogoutURL is the SAML Single log-out URL to initiate SAML SLO (single log-out), if applicable. string SAMLSingleLogoutURL = 3 [(gogoproto.jsontag) = "samlSingleLogoutUrl,omitempty"]; + + // UserID is the ID of the identity. Some connectors like GitHub have an + // unique ID apart from the username. + string UserID = 4 [(gogoproto.jsontag) = "user_id,omitempty"]; } // LoginStatus is a login status of the user @@ -4246,19 +4342,6 @@ message WebSessionSpecV2 { bytes TLSPriv = 15 [(gogoproto.jsontag) = "tls_priv,omitempty"]; } -// TrustedDeviceRequirement indicates whether access may be hindered by the lack -// of a trusted device. -enum TrustedDeviceRequirement { - // Device requirement not determined. - // Does not mean that a device is not required, only that the necessary data - // was not considered. - TRUSTED_DEVICE_REQUIREMENT_UNSPECIFIED = 0; - // Trusted device not required. - TRUSTED_DEVICE_REQUIREMENT_NOT_REQUIRED = 1; - // Trusted device required by either cluster mode or user roles. - TRUSTED_DEVICE_REQUIREMENT_REQUIRED = 2; -} - // Web-focused view of teleport.devicetrust.v1.DeviceWebToken. message DeviceWebToken { // Opaque token identifier. @@ -5193,7 +5276,7 @@ message GithubAuthRequest { string KubernetesCluster = 13 [(gogoproto.jsontag) = "kubernetes_cluster,omitempty"]; // SSOTestFlow indicates if the request is part of the test flow. bool SSOTestFlow = 14 [(gogoproto.jsontag) = "sso_test_flow"]; - // ConnectorSpec is embedded connector spec for use in test flow. + // ConnectorSpec is embedded connector spec for use in test flow or authenticated user flow. GithubConnectorSpecV3 ConnectorSpec = 15 [(gogoproto.jsontag) = "connector_spec,omitempty"]; // AttestationStatement is an attestation statement for the given public key. // @@ -5217,6 +5300,10 @@ message GithubAuthRequest { teleport.attestation.v1.AttestationStatement ssh_attestation_statement = 21 [(gogoproto.jsontag) = "ssh_attestation_statement,omitempty"]; // TlsAttestationStatement is an attestation statement for the given TLS public key. teleport.attestation.v1.AttestationStatement tls_attestation_statement = 22 [(gogoproto.jsontag) = "tls_attestation_statement,omitempty"]; + // AuthenticatedUser is the username of an authenticated Teleport user. This + // OAuth flow is used to retrieve GitHub identity info which will be added to + // the existing user. + string authenticated_user = 23 [(gogoproto.jsontag) = "authenticated_user,omitempty"]; } // SSOWarnings conveys a user-facing main message along with auxiliary warnings. @@ -5387,6 +5474,12 @@ message GithubClaims { // Teams is the users team membership repeated string Teams = 3 [(gogoproto.jsontag) = "teams"]; + + // UserID is a global unique integer that is assigned to each GitHub user. The + // user ID is immutable (unlike the GitHub username) and can be found in APIs + // like get user. + // https://docs.github.com/en/rest/users/users + string UserID = 4 [(gogoproto.jsontag) = "user_id,omitempty"]; } // TeamMapping represents a single team membership mapping. @@ -6384,6 +6477,8 @@ message PluginSpecV1 { PluginEmailSettings email = 17; // Settings for the Microsoft Teams plugin PluginMSTeamsSettings msteams = 18; + // Settings for the OpenTex NetIQ plugin. + PluginNetIQSettings net_iq = 19; } // generation contains a unique ID that should: @@ -6810,6 +6905,18 @@ message PluginMSTeamsSettings { string default_recipient = 5; } +// PluginNetIQSettings defines the settings for a NetIQ integration plugin +message PluginNetIQSettings { + option (gogoproto.equal) = true; + // oauth_issuer_endpoint is the NetIQ Oauth Issuer endpoint. + // Usually, it's equal to https://osp.domain.ext/a/idm/auth/oauth2 + string oauth_issuer_endpoint = 1; + // api_endpoint is the IDM PROV Rest API location. + string api_endpoint = 2; + // insecure_skip_verify controls whether the NetIQ certificate validation should be skipped. + bool insecure_skip_verify = 3; +} + message PluginBootstrapCredentialsV1 { oneof credentials { PluginOAuth2AuthorizationCodeCredentials oauth2_authorization_code = 1; @@ -6848,6 +6955,8 @@ message PluginStatusV1 { PluginOktaStatusV1 okta = 7; // AWSIC holds status details for the AWS Identity Center plugin. PluginAWSICStatusV1 aws_ic = 8; + // NetIQ holds status details for the NetIQ plugin. + PluginNetIQStatusV1 net_iq = 9; } // last_raw_error variable stores the most recent raw error message received from an API or service. @@ -6857,6 +6966,18 @@ message PluginStatusV1 { string last_raw_error = 6; } +// PluginNetIQStatusV1 is the status details for the NetIQ plugin. +message PluginNetIQStatusV1 { + // imported_users is the number of users imported from NetIQ eDirectory. + uint32 imported_users = 1; + // imported_groups is the number of groups imported from NetIQ eDirectory. + uint32 imported_groups = 2; + // imported_roles is the number of roles imported from NetIQ eDirectory. + uint32 imported_roles = 3; + // imported_resources is the number of resources imported from NetIQ eDirectory. + uint32 imported_resources = 4; +} + // PluginGitlabStatusV1 is the status details for the Gitlab plugin. message PluginGitlabStatusV1 { // imported_users is the number of users imported from Gitlab. @@ -7119,6 +7240,7 @@ message PluginStaticCredentialsSpecV1 { string APIToken = 1; PluginStaticCredentialsBasicAuth BasicAuth = 2; PluginStaticCredentialsOAuthClientSecret OAuthClientSecret = 3; + PluginStaticCredentialsSSHCertAuthorities SSHCertAuthorities = 4; } } @@ -7140,6 +7262,14 @@ message PluginStaticCredentialsOAuthClientSecret { string ClientSecret = 2 [(gogoproto.jsontag) = "client_secret"]; } +// PluginStaticCredentialsSSHCertAuthorities contains the active SSH CAs used +// for the integration or plugin. +message PluginStaticCredentialsSSHCertAuthorities { + // CertAuthorities contains the active SSH CAs used for the integration or + // plugin. + repeated SSHKeyPair cert_authorities = 1; +} + // SAMLIdPServiceProviderV1 is the representation of a SAML IdP service provider. message SAMLIdPServiceProviderV1 { option (gogoproto.goproto_stringer) = false; @@ -7487,7 +7617,12 @@ message IntegrationSpecV1 { AWSOIDCIntegrationSpecV1 AWSOIDC = 1 [(gogoproto.jsontag) = "aws_oidc,omitempty"]; // AzureOIDC contains the specific fields to handle the Azure OIDC Integration subkind AzureOIDCIntegrationSpecV1 AzureOIDC = 2 [(gogoproto.jsontag) = "azure_oidc,omitempty"]; + // GitHub contains the specific fields to handle the GitHub integration subkind. + GitHubIntegrationSpecV1 GitHub = 3 [(gogoproto.jsontag) = "github,omitempty"]; } + + // Credentials contains credentials for the integration. + PluginCredentialsV1 credentials = 4; } // AWSOIDCIntegrationSpecV1 contains the spec properties for the AWS OIDC SubKind Integration. @@ -7532,6 +7667,12 @@ message AzureOIDCIntegrationSpecV1 { string ClientID = 2 [(gogoproto.jsontag) = "client_id,omitempty"]; } +// GitHubIntegrationSpecV1 contains the specific fields to handle the GitHub integration subkind. +message GitHubIntegrationSpecV1 { + // Organization specifies the name of the organization for the GitHub integration. + string Organization = 1 [(gogoproto.jsontag) = "organization,omitempty"]; +} + // HeadlessAuthentication holds data for an ongoing headless authentication attempt. message HeadlessAuthentication { // Header is the resource header. @@ -7920,12 +8061,14 @@ message OktaOptions { message AccessGraphSync { // AWS is a configuration for AWS Access Graph service poll service. repeated AccessGraphAWSSync AWS = 1 [(gogoproto.jsontag) = "aws,omitempty"]; - // PollInterval is the frequency at which to poll for AWS resources + // PollInterval is the frequency at which to poll for resources google.protobuf.Duration PollInterval = 2 [ (gogoproto.jsontag) = "poll_interval,omitempty", (gogoproto.nullable) = false, (gogoproto.stdduration) = true ]; + // Azure is a configuration for Azure Access Graph service poll service. + repeated AccessGraphAzureSync Azure = 3 [(gogoproto.jsontag) = "azure,omitempty"]; } // AccessGraphAWSSync is a configuration for AWS Access Graph service poll service. @@ -7937,3 +8080,11 @@ message AccessGraphAWSSync { // Integration is the integration name used to generate credentials to interact with AWS APIs. string Integration = 4 [(gogoproto.jsontag) = "integration,omitempty"]; } + +// AccessGraphAzureSync is a configuration for Azure Access Graph service poll service. +message AccessGraphAzureSync { + // SubscriptionID Is the ID of the Azure subscription to sync resources from + string SubscriptionID = 1 [(gogoproto.jsontag) = "subscription_id,omitempty"]; + // Integration is the integration name used to generate credentials to interact with AWS APIs. + string Integration = 2 [(gogoproto.jsontag) = "integration,omitempty"]; +} diff --git a/integrations/operator/hack/fixture-operator-role.yaml b/integrations/operator/hack/fixture-operator-role.yaml index e9925b19a106c..ac6e88a6dfbd1 100644 --- a/integrations/operator/hack/fixture-operator-role.yaml +++ b/integrations/operator/hack/fixture-operator-role.yaml @@ -73,5 +73,13 @@ spec: - read - update - delete + - resources: + - trusted_cluster + verbs: + - list + - create + - read + - update + - delete deny: {} version: v7 diff --git a/lib/auth/trustedcluster.go b/lib/auth/trustedcluster.go index a02e8f4b74de6..c6a11a6d5e5db 100644 --- a/lib/auth/trustedcluster.go +++ b/lib/auth/trustedcluster.go @@ -84,7 +84,6 @@ func (a *Server) UpdateTrustedCluster(ctx context.Context, tc types.TrustedClust if err != nil { return nil, trace.Wrap(err) } - updated, err := a.updateTrustedCluster(ctx, tc, existingCluster) return updated, trace.Wrap(err) } From b6e2badbe969786d69d6c119a4472cbf4005cb61 Mon Sep 17 00:00:00 2001 From: Brian Joerger Date: Mon, 13 Jan 2025 09:57:54 -0800 Subject: [PATCH 08/15] Fix Per-session MFA for desktops (#50793) * Add sendChallengeResponse implementation for desktop sessions. * Rename useMfaTty to useMfaEmitter. --- .../teleport/src/Console/DocumentDb/DocumentDb.tsx | 4 ++-- .../src/Console/DocumentKubeExec/DocumentKubeExec.tsx | 4 ++-- .../teleport/src/Console/DocumentSsh/DocumentSsh.tsx | 4 ++-- .../teleport/src/DesktopSession/useDesktopSession.tsx | 4 ++-- web/packages/teleport/src/lib/tdp/client.ts | 9 +++++++++ web/packages/teleport/src/lib/useMfa.ts | 2 +- 6 files changed, 18 insertions(+), 9 deletions(-) diff --git a/web/packages/teleport/src/Console/DocumentDb/DocumentDb.tsx b/web/packages/teleport/src/Console/DocumentDb/DocumentDb.tsx index e17bed66fe6b2..780f03e1d788f 100644 --- a/web/packages/teleport/src/Console/DocumentDb/DocumentDb.tsx +++ b/web/packages/teleport/src/Console/DocumentDb/DocumentDb.tsx @@ -24,7 +24,7 @@ import AuthnDialog from 'teleport/components/AuthnDialog'; import Document from 'teleport/Console/Document'; import { Terminal, TerminalRef } from 'teleport/Console/DocumentSsh/Terminal'; import * as stores from 'teleport/Console/stores/types'; -import { useMfaTty } from 'teleport/lib/useMfa'; +import { useMfaEmitter } from 'teleport/lib/useMfa'; import { ConnectDialog } from './ConnectDialog'; import { useDbSession } from './useDbSession'; @@ -37,7 +37,7 @@ type Props = { export function DocumentDb({ doc, visible }: Props) { const terminalRef = useRef(); const { tty, status, closeDocument, sendDbConnectData } = useDbSession(doc); - const mfa = useMfaTty(tty); + const mfa = useMfaEmitter(tty); useEffect(() => { // when switching tabs or closing tabs, focus on visible terminal terminalRef.current?.focus(); diff --git a/web/packages/teleport/src/Console/DocumentKubeExec/DocumentKubeExec.tsx b/web/packages/teleport/src/Console/DocumentKubeExec/DocumentKubeExec.tsx index 5a250c9d4b3f1..1d382b40dc91c 100644 --- a/web/packages/teleport/src/Console/DocumentKubeExec/DocumentKubeExec.tsx +++ b/web/packages/teleport/src/Console/DocumentKubeExec/DocumentKubeExec.tsx @@ -25,7 +25,7 @@ import Document from 'teleport/Console/Document'; import useKubeExecSession from 'teleport/Console/DocumentKubeExec/useKubeExecSession'; import { Terminal, TerminalRef } from 'teleport/Console/DocumentSsh/Terminal'; import * as stores from 'teleport/Console/stores/types'; -import { useMfaTty } from 'teleport/lib/useMfa'; +import { useMfaEmitter } from 'teleport/lib/useMfa'; import KubeExecData from './KubeExecDataDialog'; @@ -38,7 +38,7 @@ export default function DocumentKubeExec({ doc, visible }: Props) { const terminalRef = useRef(); const { tty, status, closeDocument, sendKubeExecData } = useKubeExecSession(doc); - const mfa = useMfaTty(tty); + const mfa = useMfaEmitter(tty); useEffect(() => { // when switching tabs or closing tabs, focus on visible terminal terminalRef.current?.focus(); diff --git a/web/packages/teleport/src/Console/DocumentSsh/DocumentSsh.tsx b/web/packages/teleport/src/Console/DocumentSsh/DocumentSsh.tsx index 6cf952ccfc292..b7a2b93534f84 100644 --- a/web/packages/teleport/src/Console/DocumentSsh/DocumentSsh.tsx +++ b/web/packages/teleport/src/Console/DocumentSsh/DocumentSsh.tsx @@ -30,7 +30,7 @@ import { TerminalSearch } from 'shared/components/TerminalSearch'; import AuthnDialog from 'teleport/components/AuthnDialog'; import * as stores from 'teleport/Console/stores'; -import { useMfa, useMfaTty } from 'teleport/lib/useMfa'; +import { useMfa, useMfaEmitter } from 'teleport/lib/useMfa'; import { MfaChallengeScope } from 'teleport/services/auth/auth'; import { useConsoleContext } from '../consoleContextProvider'; @@ -54,7 +54,7 @@ function DocumentSsh({ doc, visible }: PropTypes) { const { tty, status, closeDocument, session } = useSshSession(doc); const [showSearch, setShowSearch] = useState(false); - const ttyMfa = useMfaTty(tty); + const ttyMfa = useMfaEmitter(tty); const ftMfa = useMfa({ isMfaRequired: ttyMfa.required, req: { diff --git a/web/packages/teleport/src/DesktopSession/useDesktopSession.tsx b/web/packages/teleport/src/DesktopSession/useDesktopSession.tsx index 75367eeae955e..e5b1446b09b4a 100644 --- a/web/packages/teleport/src/DesktopSession/useDesktopSession.tsx +++ b/web/packages/teleport/src/DesktopSession/useDesktopSession.tsx @@ -24,7 +24,7 @@ import useAttempt from 'shared/hooks/useAttemptNext'; import type { UrlDesktopParams } from 'teleport/config'; import { ButtonState } from 'teleport/lib/tdp'; -import { useMfaTty } from 'teleport/lib/useMfa'; +import { useMfaEmitter } from 'teleport/lib/useMfa'; import desktopService from 'teleport/services/desktops'; import userService from 'teleport/services/user'; @@ -129,7 +129,7 @@ export default function useDesktopSession() { }); const tdpClient = clientCanvasProps.tdpClient; - const mfa = useMfaTty(tdpClient); + const mfa = useMfaEmitter(tdpClient); const onShareDirectory = () => { try { diff --git a/web/packages/teleport/src/lib/tdp/client.ts b/web/packages/teleport/src/lib/tdp/client.ts index 5434a504631cd..83250b8bddbc6 100644 --- a/web/packages/teleport/src/lib/tdp/client.ts +++ b/web/packages/teleport/src/lib/tdp/client.ts @@ -25,6 +25,7 @@ import init, { import { AuthenticatedWebSocket } from 'teleport/lib/AuthenticatedWebSocket'; import { EventEmitterMfaSender } from 'teleport/lib/EventEmitterMfaSender'; import { TermEvent, WebsocketCloseCode } from 'teleport/lib/term/enums'; +import { MfaChallengeResponse } from 'teleport/services/mfa'; import Codec, { FileType, @@ -619,6 +620,14 @@ export default class Client extends EventEmitterMfaSender { this.send(this.codec.encodeClipboardData(clipboardData)); } + sendChallengeResponse(data: MfaChallengeResponse) { + const msg = this.codec.encodeMfaJson({ + mfaType: 'n', + jsonString: JSON.stringify(data), + }); + this.send(msg); + } + addSharedDirectory(sharedDirectory: FileSystemDirectoryHandle) { try { this.sdManager.add(sharedDirectory); diff --git a/web/packages/teleport/src/lib/useMfa.ts b/web/packages/teleport/src/lib/useMfa.ts index 54d1299c65648..4d014f10e23ba 100644 --- a/web/packages/teleport/src/lib/useMfa.ts +++ b/web/packages/teleport/src/lib/useMfa.ts @@ -172,7 +172,7 @@ export function useMfa({ req, isMfaRequired }: MfaProps): MfaState { }; } -export function useMfaTty(emitterSender: EventEmitterMfaSender): MfaState { +export function useMfaEmitter(emitterSender: EventEmitterMfaSender): MfaState { const [mfaRequired, setMfaRequired] = useState(false); const mfa = useMfa({ isMfaRequired: mfaRequired }); From 47f4498b76c049b7a3ad250bc3df4fc297bd017d Mon Sep 17 00:00:00 2001 From: Matt Brock Date: Mon, 13 Jan 2025 13:27:58 -0600 Subject: [PATCH 09/15] Adding the Azure sync module functions along with new cloud client functionality (#50366) * Protobuf and configuration for Access Graph Azure Discovery * Adding the Azure sync module functions along with new cloud client functionality * Forgot to decouple role definitions fetching function from the fetcher * Moving reconciliation to the upstream azure sync PR * Moving reconciliation test to the upstream azure sync PR * Updating go.sum * Fixing rebase after protobuf gen * Nolinting until upstream PRs * Updating to use existing msgraph client * Adding protection around nil values * PR feedback * Updating principal fetching to incorporate metadata from principal subtypes * Updating opts to not leak URL parameters * Conformant package name * Using variadic options * PR feedback * Removing memberOf expansion * Expanding memberships by calling memberOf on each user * Also returning expanded principals for improved readability * Removing ptrToList * PR feedback * Rebase go.sum stuff * Go mod tidy * Linting * Linting * Collecting errors from fetching memberships and using a WithContext error group * Fixing go.mod * Update lib/msgraph/paginated.go Co-authored-by: Tiago Silva * PR feedback * e ref update * e ref update * Fixing method * Fetching group members from groups rather than memberships of each principal * Linting --------- Co-authored-by: Tiago Silva --- go.mod | 1 + go.sum | 2 + integrations/event-handler/go.mod | 1 + integrations/event-handler/go.sum | 2 + integrations/terraform/go.mod | 1 + integrations/terraform/go.sum | 2 + lib/cloud/azure/roleassignments.go | 57 ++++++++++++ lib/cloud/azure/roledefinitions.go | 57 ++++++++++++ lib/cloud/clients.go | 28 +++++- lib/msgraph/paginated.go | 8 ++ .../fetchers/azure-sync/memberships.go | 65 ++++++++++++++ .../fetchers/azure-sync/principals.go | 87 +++++++++++++++++++ .../fetchers/azure-sync/roleassignments.go | 68 +++++++++++++++ .../fetchers/azure-sync/roledefinitions.go | 78 +++++++++++++++++ .../fetchers/azure-sync/virtualmachines.go | 61 +++++++++++++ 15 files changed, 517 insertions(+), 1 deletion(-) create mode 100644 lib/cloud/azure/roleassignments.go create mode 100644 lib/cloud/azure/roledefinitions.go create mode 100644 lib/srv/discovery/fetchers/azure-sync/memberships.go create mode 100644 lib/srv/discovery/fetchers/azure-sync/principals.go create mode 100644 lib/srv/discovery/fetchers/azure-sync/roleassignments.go create mode 100644 lib/srv/discovery/fetchers/azure-sync/roledefinitions.go create mode 100644 lib/srv/discovery/fetchers/azure-sync/virtualmachines.go diff --git a/go.mod b/go.mod index 625a780eb3ff6..c5594219a47bc 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( connectrpc.com/connect v1.18.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v6 v6.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 diff --git a/go.sum b/go.sum index 5bf38ba7fc0c4..af8ff9ce4acc7 100644 --- a/go.sum +++ b/go.sum @@ -668,6 +668,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLC github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 h1:Hp+EScFOu9HeCbeW8WU2yQPJd4gGwhMgKxWe+G6jNzw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0/go.mod h1:/pz8dyNQe+Ey3yBp/XuYz7oqX8YDNWVpPB0hH3XWfbc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0 h1:JAebRMoc3vL+Nd97GBprHYHucO4+wlW+tNbBIumqJlk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0/go.mod h1:zflC9v4VfViJrSvcvplqws/yGXVbUEMZi/iHpZdSPWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5 v5.0.0 h1:5n7dPVqsWfVKw+ZiEKSd3Kzu7gwBkbEBkeXb8rgaE9Q= diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod index 19d919b359e39..2a4ec93e2f6ac 100644 --- a/integrations/event-handler/go.mod +++ b/integrations/event-handler/go.mod @@ -37,6 +37,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v6 v6.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 // indirect diff --git a/integrations/event-handler/go.sum b/integrations/event-handler/go.sum index 1f0435df0d184..fbd5df9b4923f 100644 --- a/integrations/event-handler/go.sum +++ b/integrations/event-handler/go.sum @@ -631,6 +631,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvUL github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 h1:Hp+EScFOu9HeCbeW8WU2yQPJd4gGwhMgKxWe+G6jNzw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0/go.mod h1:/pz8dyNQe+Ey3yBp/XuYz7oqX8YDNWVpPB0hH3XWfbc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0 h1:JAebRMoc3vL+Nd97GBprHYHucO4+wlW+tNbBIumqJlk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0/go.mod h1:zflC9v4VfViJrSvcvplqws/yGXVbUEMZi/iHpZdSPWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5 v5.0.0 h1:5n7dPVqsWfVKw+ZiEKSd3Kzu7gwBkbEBkeXb8rgaE9Q= diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod index 5222dc914a105..3f0a69be92443 100644 --- a/integrations/terraform/go.mod +++ b/integrations/terraform/go.mod @@ -43,6 +43,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v6 v6.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 // indirect diff --git a/integrations/terraform/go.sum b/integrations/terraform/go.sum index da4bca430e263..6c0be667fb1a2 100644 --- a/integrations/terraform/go.sum +++ b/integrations/terraform/go.sum @@ -644,6 +644,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvUL github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 h1:Hp+EScFOu9HeCbeW8WU2yQPJd4gGwhMgKxWe+G6jNzw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0/go.mod h1:/pz8dyNQe+Ey3yBp/XuYz7oqX8YDNWVpPB0hH3XWfbc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0 h1:JAebRMoc3vL+Nd97GBprHYHucO4+wlW+tNbBIumqJlk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.2.0/go.mod h1:zflC9v4VfViJrSvcvplqws/yGXVbUEMZi/iHpZdSPWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5 v5.0.0 h1:5n7dPVqsWfVKw+ZiEKSd3Kzu7gwBkbEBkeXb8rgaE9Q= diff --git a/lib/cloud/azure/roleassignments.go b/lib/cloud/azure/roleassignments.go new file mode 100644 index 0000000000000..114bceef88b96 --- /dev/null +++ b/lib/cloud/azure/roleassignments.go @@ -0,0 +1,57 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azure + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2" + "github.com/gravitational/trace" +) + +// RoleAssignmentsClient wraps the Azure API to provide a high level subset of functionality +type RoleAssignmentsClient struct { + cli *armauthorization.RoleAssignmentsClient +} + +// NewRoleAssignmentsClient creates a new client for a given subscription and credentials +func NewRoleAssignmentsClient(subscription string, cred azcore.TokenCredential, options *arm.ClientOptions) (*RoleAssignmentsClient, error) { + clientFactory, err := armauthorization.NewClientFactory(subscription, cred, options) + if err != nil { + return nil, trace.Wrap(err) + } + roleDefCli := clientFactory.NewRoleAssignmentsClient() + return &RoleAssignmentsClient{cli: roleDefCli}, nil +} + +// ListRoleAssignments returns role assignments for a given scope +func (c *RoleAssignmentsClient) ListRoleAssignments(ctx context.Context, scope string) ([]*armauthorization.RoleAssignment, error) { + pager := c.cli.NewListForScopePager(scope, nil) + var roleDefs []*armauthorization.RoleAssignment + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, trace.Wrap(err) + } + roleDefs = append(roleDefs, page.Value...) + } + return roleDefs, nil +} diff --git a/lib/cloud/azure/roledefinitions.go b/lib/cloud/azure/roledefinitions.go new file mode 100644 index 0000000000000..cdc46196aa530 --- /dev/null +++ b/lib/cloud/azure/roledefinitions.go @@ -0,0 +1,57 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azure + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2" + "github.com/gravitational/trace" +) + +// RoleDefinitionsClient wraps the Azure API to provide a high level subset of functionality +type RoleDefinitionsClient struct { + cli *armauthorization.RoleDefinitionsClient +} + +// NewRoleDefinitionsClient creates a new client for a given subscription and credentials +func NewRoleDefinitionsClient(subscription string, cred azcore.TokenCredential, options *arm.ClientOptions) (*RoleDefinitionsClient, error) { + clientFactory, err := armauthorization.NewClientFactory(subscription, cred, options) + if err != nil { + return nil, trace.Wrap(err) + } + roleDefCli := clientFactory.NewRoleDefinitionsClient() + return &RoleDefinitionsClient{cli: roleDefCli}, nil +} + +// ListRoleDefinitions returns role definitions for a given scope +func (c *RoleDefinitionsClient) ListRoleDefinitions(ctx context.Context, scope string) ([]*armauthorization.RoleDefinition, error) { + pager := c.cli.NewListPager(scope, nil) + var roleDefs []*armauthorization.RoleDefinition + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, trace.Wrap(err) + } + roleDefs = append(roleDefs, page.Value...) + } + return roleDefs, nil +} diff --git a/lib/cloud/clients.go b/lib/cloud/clients.go index cc50c98c1ba4f..638658e761e48 100644 --- a/lib/cloud/clients.go +++ b/lib/cloud/clients.go @@ -344,6 +344,10 @@ type azureClients struct { azurePostgresFlexServersClients azure.ClientMap[azure.PostgresFlexServersClient] // azureRunCommandClients contains the cached Azure Run Command clients. azureRunCommandClients azure.ClientMap[azure.RunCommandClient] + // azureRoleDefinitionsClients contains the cached Azure Role Definitions clients. + azureRoleDefinitionsClients azure.ClientMap[azure.RoleDefinitionsClient] + // azureRoleAssignmentsClients contains the cached Azure Role Assignments clients. + azureRoleAssignmentsClients azure.ClientMap[azure.RoleAssignmentsClient] } // credentialsSource defines where the credentials must come from. @@ -717,6 +721,16 @@ func (c *cloudClients) GetAzureRunCommandClient(subscription string) (azure.RunC return c.azureRunCommandClients.Get(subscription, c.GetAzureCredential) } +// GetAzureRoleDefinitionsClient returns an Azure Role Definitions client +func (c *cloudClients) GetAzureRoleDefinitionsClient(subscription string) (azure.RoleDefinitionsClient, error) { + return c.azureRoleDefinitionsClients.Get(subscription, c.GetAzureCredential) +} + +// GetAzureRoleAssignmentsClient returns an Azure Role Assignments client +func (c *cloudClients) GetAzureRoleAssignmentsClient(subscription string) (azure.RoleAssignmentsClient, error) { + return c.azureRoleAssignmentsClients.Get(subscription, c.GetAzureCredential) +} + // Close closes all initialized clients. func (c *cloudClients) Close() (err error) { c.mtx.Lock() @@ -1021,6 +1035,8 @@ type TestCloudClients struct { AzureMySQLFlex azure.MySQLFlexServersClient AzurePostgresFlex azure.PostgresFlexServersClient AzureRunCommand azure.RunCommandClient + AzureRoleDefinitions azure.RoleDefinitionsClient + AzureRoleAssignments azure.RoleAssignmentsClient } // GetAWSSession returns AWS session for the specified region, optionally @@ -1244,11 +1260,21 @@ func (c *TestCloudClients) GetAzurePostgresFlexServersClient(subscription string return c.AzurePostgresFlex, nil } -// GetAzureRunCommand returns an Azure Run Command client for the given subscription. +// GetAzureRunCommandClient returns an Azure Run Command client for the given subscription. func (c *TestCloudClients) GetAzureRunCommandClient(subscription string) (azure.RunCommandClient, error) { return c.AzureRunCommand, nil } +// GetAzureRoleDefinitionsClient returns an Azure Role Definitions client for the given subscription. +func (c *TestCloudClients) GetAzureRoleDefinitionsClient(subscription string) (azure.RoleDefinitionsClient, error) { + return c.AzureRoleDefinitions, nil +} + +// GetAzureRoleAssignmentsClient returns an Azure Role Assignments client for the given subscription. +func (c *TestCloudClients) GetAzureRoleAssignmentsClient(subscription string) (azure.RoleAssignmentsClient, error) { + return c.AzureRoleAssignments, nil +} + // Close closes all initialized clients. func (c *TestCloudClients) Close() error { return nil diff --git a/lib/msgraph/paginated.go b/lib/msgraph/paginated.go index 51c587f19d074..a0b9488af9d70 100644 --- a/lib/msgraph/paginated.go +++ b/lib/msgraph/paginated.go @@ -101,6 +101,14 @@ func (c *Client) IterateUsers(ctx context.Context, f func(*User) bool) error { return iterateSimple(c, ctx, "users", f) } +// IterateServicePrincipals lists all service principals in the Entra ID directory using pagination. +// `f` will be called for each object in the result set. +// if `f` returns `false`, the iteration is stopped (equivalent to `break` in a normal loop). +// Ref: [https://learn.microsoft.com/en-us/graph/api/serviceprincipal-list]. +func (c *Client) IterateServicePrincipals(ctx context.Context, f func(principal *ServicePrincipal) bool) error { + return iterateSimple(c, ctx, "servicePrincipals", f) +} + // IterateGroupMembers lists all members for the given Entra ID group using pagination. // `f` will be called for each object in the result set. // if `f` returns `false`, the iteration is stopped (equivalent to `break` in a normal loop). diff --git a/lib/srv/discovery/fetchers/azure-sync/memberships.go b/lib/srv/discovery/fetchers/azure-sync/memberships.go new file mode 100644 index 0000000000000..f05be8f72567c --- /dev/null +++ b/lib/srv/discovery/fetchers/azure-sync/memberships.go @@ -0,0 +1,65 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azuresync + +import ( + "context" + + "github.com/gravitational/trace" + "golang.org/x/sync/errgroup" + + accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" + "github.com/gravitational/teleport/lib/msgraph" +) + +const parallelism = 10 //nolint:unused // invoked in a dependent PR + +// expandMemberships adds membership data to AzurePrincipal objects by querying the Graph API for group memberships +func expandMemberships(ctx context.Context, cli *msgraph.Client, principals []*accessgraphv1alpha.AzurePrincipal) ([]*accessgraphv1alpha.AzurePrincipal, error) { //nolint:unused // invoked in a dependent PR + // Map principals by ID + var principalsMap = make(map[string]*accessgraphv1alpha.AzurePrincipal) + for _, principal := range principals { + principalsMap[principal.Id] = principal + } + // Iterate through the Azure groups and add the group ID as a membership for its corresponding principal + eg, _ := errgroup.WithContext(ctx) + eg.SetLimit(parallelism) + errCh := make(chan error, len(principals)) + for _, principal := range principals { + if principal.ObjectType != "group" { + continue + } + group := principal + eg.Go(func() error { + err := cli.IterateGroupMembers(ctx, group.Id, func(member msgraph.GroupMember) bool { + if memberPrincipal, ok := principalsMap[*member.GetID()]; ok { + memberPrincipal.MemberOf = append(memberPrincipal.MemberOf, group.Id) + } + return true + }) + if err != nil { + errCh <- err + } + return nil + }) + } + _ = eg.Wait() + close(errCh) + return principals, trace.NewAggregateFromChannel(errCh, ctx) +} diff --git a/lib/srv/discovery/fetchers/azure-sync/principals.go b/lib/srv/discovery/fetchers/azure-sync/principals.go new file mode 100644 index 0000000000000..073d6c4713e0c --- /dev/null +++ b/lib/srv/discovery/fetchers/azure-sync/principals.go @@ -0,0 +1,87 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azuresync + +import ( + "context" + + "github.com/gravitational/trace" + "google.golang.org/protobuf/types/known/timestamppb" + + accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" + "github.com/gravitational/teleport/lib/msgraph" +) + +type dirObjMetadata struct { //nolint:unused // invoked in a dependent PR + objectType string +} + +type queryResult struct { //nolint:unused // invoked in a dependent PR + metadata dirObjMetadata + dirObj msgraph.DirectoryObject +} + +// fetchPrincipals fetches the Azure principals (users, groups, and service principals) using the Graph API +func fetchPrincipals(ctx context.Context, subscriptionID string, cli *msgraph.Client) ([]*accessgraphv1alpha.AzurePrincipal, error) { //nolint: unused // invoked in a dependent PR + // Fetch the users, groups, and service principals as directory objects + var queryResults []queryResult + err := cli.IterateUsers(ctx, func(user *msgraph.User) bool { + res := queryResult{metadata: dirObjMetadata{objectType: "user"}, dirObj: user.DirectoryObject} + queryResults = append(queryResults, res) + return true + }) + if err != nil { + return nil, trace.Wrap(err) + } + err = cli.IterateGroups(ctx, func(group *msgraph.Group) bool { + res := queryResult{metadata: dirObjMetadata{objectType: "group"}, dirObj: group.DirectoryObject} + queryResults = append(queryResults, res) + return true + }) + if err != nil { + return nil, trace.Wrap(err) + } + err = cli.IterateServicePrincipals(ctx, func(servicePrincipal *msgraph.ServicePrincipal) bool { + res := queryResult{metadata: dirObjMetadata{objectType: "servicePrincipal"}, dirObj: servicePrincipal.DirectoryObject} + queryResults = append(queryResults, res) + return true + }) + if err != nil { + return nil, trace.Wrap(err) + } + + // Return the users, groups, and service principals as protobuf messages + var fetchErrs []error + var pbPrincipals []*accessgraphv1alpha.AzurePrincipal + for _, res := range queryResults { + if res.dirObj.ID == nil || res.dirObj.DisplayName == nil { + fetchErrs = append(fetchErrs, + trace.BadParameter("nil values on msgraph directory object: %v", res.dirObj)) + continue + } + pbPrincipals = append(pbPrincipals, &accessgraphv1alpha.AzurePrincipal{ + Id: *res.dirObj.ID, + SubscriptionId: subscriptionID, + LastSyncTime: timestamppb.Now(), + DisplayName: *res.dirObj.DisplayName, + ObjectType: res.metadata.objectType, + }) + } + return pbPrincipals, trace.NewAggregate(fetchErrs...) +} diff --git a/lib/srv/discovery/fetchers/azure-sync/roleassignments.go b/lib/srv/discovery/fetchers/azure-sync/roleassignments.go new file mode 100644 index 0000000000000..a97fe69727ef8 --- /dev/null +++ b/lib/srv/discovery/fetchers/azure-sync/roleassignments.go @@ -0,0 +1,68 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azuresync + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2" + "github.com/gravitational/trace" + "google.golang.org/protobuf/types/known/timestamppb" + + accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" +) + +// RoleAssignmentsClient specifies the methods used to fetch role assignments from Azure +type RoleAssignmentsClient interface { + ListRoleAssignments(ctx context.Context, scope string) ([]*armauthorization.RoleAssignment, error) +} + +// fetchRoleAssignments fetches Azure role assignments using the Azure role assignments API +func fetchRoleAssignments(ctx context.Context, subscriptionID string, cli RoleAssignmentsClient) ([]*accessgraphv1alpha.AzureRoleAssignment, error) { //nolint:unused // invoked in a dependent PR + // List the role definitions + roleAssigns, err := cli.ListRoleAssignments(ctx, fmt.Sprintf("/subscriptions/%s", subscriptionID)) + if err != nil { + return nil, trace.Wrap(err) + } + + // Convert to protobuf format + pbRoleAssigns := make([]*accessgraphv1alpha.AzureRoleAssignment, 0, len(roleAssigns)) + var fetchErrs []error + for _, roleAssign := range roleAssigns { + if roleAssign.ID == nil || + roleAssign.Properties == nil || + roleAssign.Properties.PrincipalID == nil || + roleAssign.Properties.Scope == nil { + fetchErrs = append(fetchErrs, + trace.BadParameter("nil values on AzureRoleAssignment object: %v", roleAssign)) + continue + } + pbRoleAssign := &accessgraphv1alpha.AzureRoleAssignment{ + Id: *roleAssign.ID, + SubscriptionId: subscriptionID, + LastSyncTime: timestamppb.Now(), + PrincipalId: *roleAssign.Properties.PrincipalID, + RoleDefinitionId: *roleAssign.Properties.RoleDefinitionID, + Scope: *roleAssign.Properties.Scope, + } + pbRoleAssigns = append(pbRoleAssigns, pbRoleAssign) + } + return pbRoleAssigns, trace.NewAggregate(fetchErrs...) +} diff --git a/lib/srv/discovery/fetchers/azure-sync/roledefinitions.go b/lib/srv/discovery/fetchers/azure-sync/roledefinitions.go new file mode 100644 index 0000000000000..485117f898b81 --- /dev/null +++ b/lib/srv/discovery/fetchers/azure-sync/roledefinitions.go @@ -0,0 +1,78 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azuresync + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2" + "github.com/gravitational/trace" + "google.golang.org/protobuf/types/known/timestamppb" + + accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" + "github.com/gravitational/teleport/lib/utils/slices" +) + +// RoleDefinitionsClient specifies the methods used to fetch roles from Azure +type RoleDefinitionsClient interface { + ListRoleDefinitions(ctx context.Context, scope string) ([]*armauthorization.RoleDefinition, error) +} + +func fetchRoleDefinitions(ctx context.Context, subscriptionID string, cli RoleDefinitionsClient) ([]*accessgraphv1alpha.AzureRoleDefinition, error) { //nolint:unused // used in a dependent PR + // List the role definitions + roleDefs, err := cli.ListRoleDefinitions(ctx, fmt.Sprintf("/subscriptions/%s", subscriptionID)) + if err != nil { + return nil, trace.Wrap(err) + } + + // Convert to protobuf format + pbRoleDefs := make([]*accessgraphv1alpha.AzureRoleDefinition, 0, len(roleDefs)) + var fetchErrs []error + for _, roleDef := range roleDefs { + if roleDef.ID == nil || + roleDef.Properties == nil || + roleDef.Properties.Permissions == nil || + roleDef.Properties.RoleName == nil { + fetchErrs = append(fetchErrs, trace.BadParameter("nil values on AzureRoleDefinition object: %v", roleDef)) + continue + } + pbPerms := make([]*accessgraphv1alpha.AzureRBACPermission, 0, len(roleDef.Properties.Permissions)) + for _, perm := range roleDef.Properties.Permissions { + if perm.Actions == nil && perm.NotActions == nil { + fetchErrs = append(fetchErrs, trace.BadParameter("nil values on Permission object: %v", perm)) + continue + } + pbPerm := accessgraphv1alpha.AzureRBACPermission{ + Actions: slices.FromPointers(perm.Actions), + NotActions: slices.FromPointers(perm.NotActions), + } + pbPerms = append(pbPerms, &pbPerm) + } + pbRoleDef := &accessgraphv1alpha.AzureRoleDefinition{ + Id: *roleDef.ID, + Name: *roleDef.Properties.RoleName, + SubscriptionId: subscriptionID, + LastSyncTime: timestamppb.Now(), + Permissions: pbPerms, + } + pbRoleDefs = append(pbRoleDefs, pbRoleDef) + } + return pbRoleDefs, trace.NewAggregate(fetchErrs...) +} diff --git a/lib/srv/discovery/fetchers/azure-sync/virtualmachines.go b/lib/srv/discovery/fetchers/azure-sync/virtualmachines.go new file mode 100644 index 0000000000000..cf0d068db7b0c --- /dev/null +++ b/lib/srv/discovery/fetchers/azure-sync/virtualmachines.go @@ -0,0 +1,61 @@ +/* + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package azuresync + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/gravitational/trace" + "google.golang.org/protobuf/types/known/timestamppb" + + accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" +) + +const allResourceGroups = "*" //nolint:unused // invoked in a dependent PR + +// VirtualMachinesClient specifies the methods used to fetch virtual machines from Azure +type VirtualMachinesClient interface { + ListVirtualMachines(ctx context.Context, resourceGroup string) ([]*armcompute.VirtualMachine, error) +} + +func fetchVirtualMachines(ctx context.Context, subscriptionID string, cli VirtualMachinesClient) ([]*accessgraphv1alpha.AzureVirtualMachine, error) { //nolint:unused // invoked in a dependent PR + vms, err := cli.ListVirtualMachines(ctx, allResourceGroups) + if err != nil { + return nil, trace.Wrap(err) + } + + // Return the VMs as protobuf messages + pbVms := make([]*accessgraphv1alpha.AzureVirtualMachine, 0, len(vms)) + var fetchErrs []error + for _, vm := range vms { + if vm.ID == nil || vm.Name == nil { + fetchErrs = append(fetchErrs, trace.BadParameter("nil values on AzureVirtualMachine object: %v", vm)) + continue + } + pbVm := accessgraphv1alpha.AzureVirtualMachine{ + Id: *vm.ID, + SubscriptionId: subscriptionID, + LastSyncTime: timestamppb.Now(), + Name: *vm.Name, + } + pbVms = append(pbVms, &pbVm) + } + return pbVms, trace.NewAggregate(fetchErrs...) +} From 62ad3fe6c7bdfa23fe770e4f3c842d4252f49485 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Mon, 13 Jan 2025 19:57:38 +0000 Subject: [PATCH 10/15] Fix EKS Discover User Task reporting (#50989) * Fix EKS Discover User Task reporting The `clusterNames` slice and `clusterByNames` key set must be the same. When there was two groups of EKS Clusters, one with App Discovery enabled and another one with it disabled, we had different set of clusters being processed. `clusterNames` had all the EKS Clusters, while `clusterByNames` only had the EKS Clusters for one of the processing groups (either AppDiscovery=on or AppDiscovery=off). This meant that when the `EnrollEKSClusters` returned an error, we looked up the map, but it might be the case that that particular EKS Cluster was not configured for the current processing group. So, the `clusterByNames[r.EksClusterName]` returned a nil value, which resulted in a panic. * add test * check if cluster exists in local map --- lib/srv/discovery/discovery_test.go | 108 +++++++++++++++++- lib/srv/discovery/kube_integration_watcher.go | 10 +- 2 files changed, 113 insertions(+), 5 deletions(-) diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index 2948e10cdb916..5e9d3d1acf7e6 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -322,6 +322,31 @@ func TestDiscoveryServer(t *testing.T) { ) require.NoError(t, err) + discoveryConfigWithAndWithoutAppDiscoveryTestName := uuid.NewString() + discoveryConfigWithAndWithoutAppDiscovery, err := discoveryconfig.NewDiscoveryConfig( + header.Metadata{Name: discoveryConfigWithAndWithoutAppDiscoveryTestName}, + discoveryconfig.Spec{ + DiscoveryGroup: defaultDiscoveryGroup, + AWS: []types.AWSMatcher{ + { + Types: []string{"eks"}, + Regions: []string{"eu-west-2"}, + Tags: map[string]utils.Strings{"EnableAppDiscovery": {"No"}}, + Integration: "my-integration", + KubeAppDiscovery: false, + }, + { + Types: []string{"eks"}, + Regions: []string{"eu-west-2"}, + Tags: map[string]utils.Strings{"EnableAppDiscovery": {"Yes"}}, + Integration: "my-integration", + KubeAppDiscovery: true, + }, + }, + }, + ) + require.NoError(t, err) + tcs := []struct { name string // presentInstances is a list of servers already present in teleport. @@ -754,6 +779,74 @@ func TestDiscoveryServer(t *testing.T) { require.Equal(t, defaultDiscoveryGroup, taskCluster.DiscoveryGroup) }, }, + { + name: "multiple EKS clusters with different KubeAppDiscovery setting failed to autoenroll and user tasks are created", + presentInstances: []types.Server{}, + foundEC2Instances: []ec2types.Instance{}, + ssm: &mockSSMClient{}, + eksClusters: []*ekstypes.Cluster{ + { + Name: aws.String("cluster01"), + Arn: aws.String("arn:aws:eks:us-west-2:123456789012:cluster/cluster01"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "EnableAppDiscovery": "Yes", + }, + }, + { + Name: aws.String("cluster02"), + Arn: aws.String("arn:aws:eks:us-west-2:123456789012:cluster/cluster02"), + Status: ekstypes.ClusterStatusActive, + Tags: map[string]string{ + "EnableAppDiscovery": "No", + }, + }, + }, + eksEnroller: &mockEKSClusterEnroller{ + resp: &integrationpb.EnrollEKSClustersResponse{ + Results: []*integrationpb.EnrollEKSClusterResult{ + { + EksClusterName: "cluster01", + Error: "access endpoint is not reachable", + IssueType: "eks-cluster-unreachable", + }, + { + EksClusterName: "cluster02", + Error: "access endpoint is not reachable", + IssueType: "eks-cluster-unreachable", + }, + }, + }, + err: nil, + }, + emitter: &mockEmitter{}, + staticMatchers: Matchers{}, + discoveryConfig: discoveryConfigWithAndWithoutAppDiscovery, + wantInstalledInstances: []string{}, + userTasksDiscoverCheck: func(t require.TestingT, i1 interface{}, i2 ...interface{}) { + existingTasks, ok := i1.([]*usertasksv1.UserTask) + require.True(t, ok, "failed to get existing tasks: %T", i1) + require.Len(t, existingTasks, 2) + existingTask := existingTasks[0] + if existingTask.Spec.DiscoverEks.AppAutoDiscover == false { + existingTask = existingTasks[1] + } + + require.Equal(t, "OPEN", existingTask.GetSpec().State) + require.Equal(t, "my-integration", existingTask.GetSpec().Integration) + require.Equal(t, "eks-cluster-unreachable", existingTask.GetSpec().IssueType) + require.Equal(t, "123456789012", existingTask.GetSpec().GetDiscoverEks().GetAccountId()) + require.Equal(t, "us-west-2", existingTask.GetSpec().GetDiscoverEks().GetRegion()) + + taskClusters := existingTask.GetSpec().GetDiscoverEks().Clusters + require.Contains(t, taskClusters, "cluster01") + taskCluster := taskClusters["cluster01"] + + require.Equal(t, "cluster01", taskCluster.Name) + require.Equal(t, discoveryConfigWithAndWithoutAppDiscoveryTestName, taskCluster.DiscoveryConfig) + require.Equal(t, defaultDiscoveryGroup, taskCluster.DiscoveryGroup) + }, + }, } for _, tc := range tcs { @@ -3528,8 +3621,19 @@ type mockEKSClusterEnroller struct { err error } -func (m *mockEKSClusterEnroller) EnrollEKSClusters(context.Context, *integrationpb.EnrollEKSClustersRequest, ...grpc.CallOption) (*integrationpb.EnrollEKSClustersResponse, error) { - return m.resp, m.err +func (m *mockEKSClusterEnroller) EnrollEKSClusters(ctx context.Context, req *integrationpb.EnrollEKSClustersRequest, opt ...grpc.CallOption) (*integrationpb.EnrollEKSClustersResponse, error) { + ret := &integrationpb.EnrollEKSClustersResponse{ + Results: []*integrationpb.EnrollEKSClusterResult{}, + } + // Filter out non-requested clusters. + for _, clusterName := range req.EksClusterNames { + for _, mockClusterResult := range m.resp.Results { + if clusterName == mockClusterResult.EksClusterName { + ret.Results = append(ret.Results, mockClusterResult) + } + } + } + return ret, m.err } type combinedDiscoveryClient struct { diff --git a/lib/srv/discovery/kube_integration_watcher.go b/lib/srv/discovery/kube_integration_watcher.go index ffbecf6497359..88d89f258f8c4 100644 --- a/lib/srv/discovery/kube_integration_watcher.go +++ b/lib/srv/discovery/kube_integration_watcher.go @@ -21,6 +21,7 @@ package discovery import ( "context" "fmt" + "maps" "slices" "strings" "sync" @@ -243,14 +244,13 @@ func (s *Server) enrollEKSClusters(region, integration, discoveryConfigName stri } ctx, cancel := context.WithTimeout(s.ctx, time.Duration(len(clusters))*30*time.Second) defer cancel() - var clusterNames []string for _, kubeAppDiscovery := range []bool{true, false} { clustersByName := make(map[string]types.DiscoveredEKSCluster) for _, c := range batchedClusters[kubeAppDiscovery] { - clusterNames = append(clusterNames, c.GetAWSConfig().Name) clustersByName[c.GetAWSConfig().Name] = c } + clusterNames := slices.Collect(maps.Keys(clustersByName)) if len(clusterNames) == 0 { continue } @@ -283,7 +283,11 @@ func (s *Server) enrollEKSClusters(region, integration, discoveryConfigName stri s.Log.DebugContext(ctx, "EKS cluster already has installed kube agent", "cluster_name", r.EksClusterName) } - cluster := clustersByName[r.EksClusterName] + cluster, ok := clustersByName[r.EksClusterName] + if !ok { + s.Log.WarnContext(ctx, "Received an EnrollEKSCluster result for a cluster which was not part of the requested clusters", "cluster_name", r.EksClusterName, "clusters_install_request", clusterNames) + continue + } s.awsEKSTasks.addFailedEnrollment( awsEKSTaskKey{ integration: integration, From ce30037005c6cf04e22894fd9fca130f5375332f Mon Sep 17 00:00:00 2001 From: Brian Joerger Date: Mon, 13 Jan 2025 12:13:12 -0800 Subject: [PATCH 11/15] Add SSO MFA docs (#50533) * Add SSO MFA docs. * Address comments from zmb3. * Fix links; minor style fix. * Address comments. * Try removing leading / in example links. * Address Nic's comments. --- .../admin-guides/access-controls/sso/sso.mdx | 112 +++++++++++++++++- examples/resources/oidc-connector-mfa.yaml | 33 ++++++ examples/resources/saml-connector-mfa.yaml | 29 +++++ 3 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 examples/resources/oidc-connector-mfa.yaml create mode 100644 examples/resources/saml-connector-mfa.yaml diff --git a/docs/pages/admin-guides/access-controls/sso/sso.mdx b/docs/pages/admin-guides/access-controls/sso/sso.mdx index 26c0003ea9128..76cd88c08b182 100644 --- a/docs/pages/admin-guides/access-controls/sso/sso.mdx +++ b/docs/pages/admin-guides/access-controls/sso/sso.mdx @@ -213,7 +213,7 @@ spec: - '2001:db8::/96' ``` -## Configuring SSO +## Configuring SSO for login Teleport works with SSO providers by relying on the concept of an **authentication connector**. An authentication connector is a configuration @@ -411,6 +411,116 @@ values to match your identity provider: At this time, the `spec.provider` field should not be set for any other identity providers. +## Configuring SSO for MFA checks + +Teleport administrators can configure Teleport to delegate MFA checks to an +SSO provider as an alternative to registering MFA devices directly with the Teleport cluster. +This allows Teleport users to use MFA devices and custom flows configured in the SSO provider +to carry out privileged actions in Teleport, such as: + +- [Per-session MFA](../guides/per-session-mfa.mdx) +- [Moderated sessions](../guides/moderated-sessions.mdx) +- [Admin actions](../guides/mfa-for-admin-actions.mdx) + +Administrators may want to consider enabling this feature in order to: + +- Make all authentication (login and MFA) go through the IDP, reducing administrative overhead +- Make custom MFA flows, such as prompting for 2 distinct devices for a single MFA check +- Integrate with non-webauthn devices supported directly by your IDP + + + SSO MFA is an enterprise feature. Only OIDC and SAML auth connectors are supported. + + +### Configure the IDP App / Client + +There is no standardized MFA flow unlike there is with SAML/OIDC +login, so each IDP may offer zero, one, or more ways to offer MFA checks. + +Generally, these offerings will fall under one of the following cases: + +1. Use a separate IDP app for MFA: + +You can create a separate IDP app with a custom MFA flow. For example, with +Auth0 (OIDC), you can create a separate app with a custom [Auth0 Action](https://auth0.com/docs/customize/actions) +which prompts for MFA for an active OIDC session. + +2. Use the same IDP app for MFA: + +Some IDPs provide a way to fork to different flows using the same IDP app. +For example, with Okta (OIDC), you can provide `acr_values: ["phr"]` to +[enforce phishing resistant authentication](https://developer.okta.com/docs/guides/step-up-authentication/main/#predefined-parameter-values). + +For a simpler approach, you could use the same IDP app for both login and MFA +with no adjustments. For Teleport MFA checks, the user will be required to +relogin through the IDP with username, password, and MFA if required. + + + While the customizability of SSO MFA presents multiple secure options previously + unavailable to administrators, it also presents the possibility of insecure + misconfigurations. Therefore, we strongly advice administrators to incorporate + strict, phishing-resistant checks with WebAuthn, Device Trust, or some similar + security features into their custom SSO MFA flow. + + +### Updating your authentication connector to enable MFA checks + +Take the authentication connector file `connector.yaml` created in [Configuring SSO for login](#configuring-sso-for-login) +and add MFA settings. + + + + +```yaml +(!examples/resources/oidc-connector-mfa.yaml!) +``` + + + + +```yaml +(!examples/resources/saml-connector-mfa.yaml!) +``` + +You may use `entity_descriptor_url` in lieu of `entity_descriptor` to fetch +the entity descriptor from your IDP. + +We recommend "pinning" the entity descriptor by including the XML rather than +fetching from a URL. + + + + +Update the connector: + +```code +$ tctl create -f connector.yaml +``` + +### Allowing SSO as an MFA method in your cluster + +Before you can use the SSO MFA flow we created above, you need to enable SSO +as a second factor in your cluster settings. Modify the dynamic config resource +using the following command: + +```code +$ tctl edit cluster_auth_preference +``` + +Make the following change: + +```diff +kind: cluster_auth_preference +version: v2 +metadata: + name: cluster-auth-preference +spec: + # ... + second_factors: + - webauthn ++ - sso +``` + ## Working with an external email identity Along with sending groups, an SSO provider will also provide a user's email address. diff --git a/examples/resources/oidc-connector-mfa.yaml b/examples/resources/oidc-connector-mfa.yaml new file mode 100644 index 0000000000000..ca56b727d1487 --- /dev/null +++ b/examples/resources/oidc-connector-mfa.yaml @@ -0,0 +1,33 @@ +kind: oidc +version: v3 +metadata: + name: oidc_connector +spec: + # Login settings + client_id: + client_secret: + # issuer_url and redirect_url are shared by both login and MFA, meaning the same OIDC provider must be used. + issuer_url: https://idp.example.com/ + redirect_url: https://mytenant.teleport.sh:443/v1/webapi/oidc/callback + # ... + + # MFA settings + mfa: + # Enabled specified whether this OIDC connector supports MFA checks. + enabled: true + # client_id and client_secret should point to an IdP configured + # app configured to handle MFA checks. In most cases, these values + # should be different from your login client ID and Secret above. + client_id: + client_secret: + # prompt can be set to request a specific prompt flow from the IdP. Supported + # values depend on the IdP. + prompt: none + # acr_values are Authentication Context Class Reference values. These values + # are context-specific and vary depending on the IdP. + acr_values: [] + # max_age is the amount of time in seconds that an IdP session is valid for. + # Defaults to 0 to always force re-authentication for MFA checks. This should + # only be set to a non-zero value if the IdP is setup to perform MFA checks on + # top of active user sessions. + max_age: 0 diff --git a/examples/resources/saml-connector-mfa.yaml b/examples/resources/saml-connector-mfa.yaml new file mode 100644 index 0000000000000..9c58802ec0ace --- /dev/null +++ b/examples/resources/saml-connector-mfa.yaml @@ -0,0 +1,29 @@ +# +# Example resource for a SAML connector +# This connector can be used for SAML endpoints like Okta +# +kind: saml +version: v2 +metadata: + # the name of the connector + name: okta +spec: + # Login settings + display: Okta + entity_descriptor_url: https://example.okta.com/app//sso/saml/metadata + # acs is shared by both login and MFA, meaning the same SAML provider must be used. + acs: https:///v1/webapi/saml/acs/new_saml_connector + # ... + + # MFA settings + mfa: + # Enabled specifies whether this SAML connector supports MFA checks. + enabled: true + # entity_descriptor_url should point to an IdP configured app that handles MFA checks. + # In most cases, this value should be different from the entity_descriptor_url above. + entity_descriptor_url: https://example.okta.com/app//sso/saml/metadata + # force_reauth determines whether existing login sessions are accepted or if + # re-authentication is always required. Defaults to "yes". This should only be + # set to false if the app described above is setup to perform MFA checks on top + # of active user sessions. + force_reauth: yes \ No newline at end of file From 4034d7c3cdccef8fd2c16f082776fecfbe5811e0 Mon Sep 17 00:00:00 2001 From: Brian Joerger Date: Mon, 13 Jan 2025 15:04:06 -0800 Subject: [PATCH 12/15] Fix data race in x11 forwarding test. (#50997) --- lib/srv/regular/sshserver_test.go | 34 ++++++++++++++++++------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/lib/srv/regular/sshserver_test.go b/lib/srv/regular/sshserver_test.go index 5b095961c3c68..eb7d3283b4508 100644 --- a/lib/srv/regular/sshserver_test.go +++ b/lib/srv/regular/sshserver_test.go @@ -1349,24 +1349,30 @@ func x11EchoSession(ctx context.Context, t *testing.T, clt *tracessh.Client) x11 os.Remove(tmpFile.Name()) }) - // type 'printenv DISPLAY > /path/to/tmp/file' into the session (dumping the value of DISPLAY into the temp file) - _, err = keyboard.Write([]byte(fmt.Sprintf("printenv %v >> %s\n\r", x11.DisplayEnv, tmpFile.Name()))) - require.NoError(t, err) + // Reading the display may fail if the session is not fully initialized + // and the write to stdin is swallowed. + display := make(chan string, 1) + require.EventuallyWithT(t, func(t *assert.CollectT) { + // enter 'printenv DISPLAY > /path/to/tmp/file' into the session (dumping the value of DISPLAY into the temp file) + _, err = keyboard.Write([]byte(fmt.Sprintf("printenv %v > %s\n\r", x11.DisplayEnv, tmpFile.Name()))) + assert.NoError(t, err) - // wait for the output - var display string - require.Eventually(t, func() bool { - output, err := os.ReadFile(tmpFile.Name()) - if err == nil && len(output) != 0 { - display = strings.TrimSpace(string(output)) - return true - } - return false - }, 10*time.Second, 100*time.Millisecond, "failed to read display") + assert.Eventually(t, func() bool { + output, err := os.ReadFile(tmpFile.Name()) + if err == nil && len(output) != 0 { + select { + case display <- strings.TrimSpace(string(output)): + default: + } + return true + } + return false + }, time.Second, 100*time.Millisecond, "failed to read display") + }, 10*time.Second, 1*time.Second) // Make a new connection to the XServer proxy, the client // XServer should echo back anything written on it. - serverDisplay, err := x11.ParseDisplay(display) + serverDisplay, err := x11.ParseDisplay(<-display) require.NoError(t, err) return serverDisplay From 0352610c94e3f51435073e042bb7992c82f485aa Mon Sep 17 00:00:00 2001 From: Nic Klaassen Date: Mon, 13 Jan 2025 17:11:33 -0800 Subject: [PATCH 13/15] [vnet] add windows tsh cli commands (#50935) * [vnet] add windows tsh cli commands This PR refactors the VNet tsh CLI commands and adds stubs for the commands that are going to be added for Windows VNet support. * fix linux build * update copyright year * use context.AfterFunc * fix typo --- tool/tsh/common/tsh.go | 27 ++++--- tool/tsh/common/vnet.go | 92 ++++++++++++++++++++++ tool/tsh/common/vnet_daemon_darwin.go | 4 +- tool/tsh/common/vnet_darwin.go | 50 ++++-------- tool/tsh/common/vnet_nodaemon.go | 15 +--- tool/tsh/common/vnet_other.go | 37 ++++----- tool/tsh/common/vnet_windows.go | 106 ++++++++++---------------- 7 files changed, 188 insertions(+), 143 deletions(-) create mode 100644 tool/tsh/common/vnet.go diff --git a/tool/tsh/common/tsh.go b/tool/tsh/common/tsh.go index 7677a6a842251..f9d4a038a9ae6 100644 --- a/tool/tsh/common/tsh.go +++ b/tool/tsh/common/tsh.go @@ -1258,9 +1258,12 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { workloadIdentityCmd := newSVIDCommands(app) - vnetCmd := newVnetCommand(app) - vnetAdminSetupCmd := newVnetAdminSetupCommand(app) - vnetDaemonCmd := newVnetDaemonCommand(app) + vnetCommand := newVnetCommand(app) + vnetAdminSetupCommand := newVnetAdminSetupCommand(app) + vnetDaemonCommand := newVnetDaemonCommand(app) + vnetInstallServiceCommand := newVnetInstallServiceCommand(app) + vnetUninstallServiceCommand := newVnetUninstallServiceCommand(app) + vnetServiceCommand := newVnetServiceCommand(app) gitCmd := newGitCommands(app) @@ -1638,12 +1641,18 @@ func Run(ctx context.Context, args []string, opts ...CliOption) error { err = onHeadlessApprove(&cf) case workloadIdentityCmd.issue.FullCommand(): err = workloadIdentityCmd.issue.run(&cf) - case vnetCmd.FullCommand(): - err = vnetCmd.run(&cf) - case vnetAdminSetupCmd.FullCommand(): - err = vnetAdminSetupCmd.run(&cf) - case vnetDaemonCmd.FullCommand(): - err = vnetDaemonCmd.run(&cf) + case vnetCommand.FullCommand(): + err = vnetCommand.run(&cf) + case vnetAdminSetupCommand.FullCommand(): + err = vnetAdminSetupCommand.run(&cf) + case vnetDaemonCommand.FullCommand(): + err = vnetDaemonCommand.run(&cf) + case vnetInstallServiceCommand.FullCommand(): + err = vnetInstallServiceCommand.run(&cf) + case vnetUninstallServiceCommand.FullCommand(): + err = vnetUninstallServiceCommand.run(&cf) + case vnetServiceCommand.FullCommand(): + err = vnetServiceCommand.run(&cf) case gitCmd.list.FullCommand(): err = gitCmd.list.run(&cf) case gitCmd.login.FullCommand(): diff --git a/tool/tsh/common/vnet.go b/tool/tsh/common/vnet.go new file mode 100644 index 0000000000000..8bcd80a57590f --- /dev/null +++ b/tool/tsh/common/vnet.go @@ -0,0 +1,92 @@ +// Teleport +// Copyright (C) 2025 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package common + +import ( + "context" + "fmt" + + "github.com/alecthomas/kingpin/v2" + "github.com/gravitational/trace" + + "github.com/gravitational/teleport/lib/vnet" +) + +type vnetCLICommand interface { + // FullCommand matches the signature of kingpin.CmdClause.FullCommand, which + // most commands should embed. + FullCommand() string + // run should be called iff FullCommand() matches the CLI parameters. + run(cf *CLIConf) error +} + +// vnetCommand implements the `tsh vnet` command to run VNet. +type vnetCommand struct { + *kingpin.CmdClause +} + +func newVnetCommand(app *kingpin.Application) *vnetCommand { + cmd := &vnetCommand{ + CmdClause: app.Command("vnet", "Start Teleport VNet, a virtual network for TCP application access."), + } + return cmd +} + +func (c *vnetCommand) run(cf *CLIConf) error { + appProvider, err := newVnetAppProvider(cf) + if err != nil { + return trace.Wrap(err) + } + processManager, err := vnet.Run(cf.Context, &vnet.RunConfig{AppProvider: appProvider}) + if err != nil { + return trace.Wrap(err) + } + fmt.Println("VNet is ready.") + context.AfterFunc(cf.Context, processManager.Close) + return trace.Wrap(processManager.Wait()) +} + +func newVnetAdminSetupCommand(app *kingpin.Application) vnetCLICommand { + return newPlatformVnetAdminSetupCommand(app) +} + +func newVnetDaemonCommand(app *kingpin.Application) vnetCLICommand { + return newPlatformVnetDaemonCommand(app) +} + +func newVnetInstallServiceCommand(app *kingpin.Application) vnetCLICommand { + return newPlatformVnetInstallServiceCommand(app) +} + +func newVnetUninstallServiceCommand(app *kingpin.Application) vnetCLICommand { + return newPlatformVnetUninstallServiceCommand(app) +} + +func newVnetServiceCommand(app *kingpin.Application) vnetCLICommand { + return newPlatformVnetServiceCommand(app) +} + +// vnetCommandNotSupported implements vnetCLICommand, it is returned when a specific +// command is not implemented for a certain platform or environment. +type vnetCommandNotSupported struct{} + +func (vnetCommandNotSupported) FullCommand() string { + return "" +} +func (vnetCommandNotSupported) run(*CLIConf) error { + panic("vnetCommandNotSupported.run should never be called, this is a bug") +} diff --git a/tool/tsh/common/vnet_daemon_darwin.go b/tool/tsh/common/vnet_daemon_darwin.go index 4154f400774bb..958248097487b 100644 --- a/tool/tsh/common/vnet_daemon_darwin.go +++ b/tool/tsh/common/vnet_daemon_darwin.go @@ -34,6 +34,8 @@ const ( vnetDaemonSubCommand = "vnet-daemon" ) +// vnetDaemonCommand implements the vnet-daemon subcommand to run the VNet MacOS +// daemon. type vnetDaemonCommand struct { *kingpin.CmdClause // Launch daemons added through SMAppService are launched from a static .plist file, hence @@ -41,7 +43,7 @@ type vnetDaemonCommand struct { // Instead, the daemon expects the arguments to be sent over XPC from an unprivileged process. } -func newVnetDaemonCommand(app *kingpin.Application) *vnetDaemonCommand { +func newPlatformVnetDaemonCommand(app *kingpin.Application) *vnetDaemonCommand { return &vnetDaemonCommand{ CmdClause: app.Command(vnetDaemonSubCommand, "Start the VNet daemon").Hidden(), } diff --git a/tool/tsh/common/vnet_darwin.go b/tool/tsh/common/vnet_darwin.go index 213a971f092b7..20c1f1b55d141 100644 --- a/tool/tsh/common/vnet_darwin.go +++ b/tool/tsh/common/vnet_darwin.go @@ -17,7 +17,6 @@ package common import ( - "fmt" "os" "github.com/alecthomas/kingpin/v2" @@ -29,38 +28,6 @@ import ( "github.com/gravitational/teleport/lib/vnet/daemon" ) -type vnetCommand struct { - *kingpin.CmdClause -} - -func newVnetCommand(app *kingpin.Application) *vnetCommand { - cmd := &vnetCommand{ - CmdClause: app.Command("vnet", "Start Teleport VNet, a virtual network for TCP application access."), - } - return cmd -} - -func (c *vnetCommand) run(cf *CLIConf) error { - appProvider, err := newVnetAppProvider(cf) - if err != nil { - return trace.Wrap(err) - } - - processManager, err := vnet.Run(cf.Context, &vnet.RunConfig{AppProvider: appProvider}) - if err != nil { - return trace.Wrap(err) - } - - go func() { - <-cf.Context.Done() - processManager.Close() - }() - - fmt.Println("VNet is ready.") - - return trace.Wrap(processManager.Wait()) -} - // vnetAdminSetupCommand is the fallback command ran as root when tsh wasn't compiled with the // vnetdaemon build tag. This is typically the case when running tsh in development where it's not // signed and bundled in tsh.app. @@ -83,7 +50,7 @@ type vnetAdminSetupCommand struct { euid int } -func newVnetAdminSetupCommand(app *kingpin.Application) *vnetAdminSetupCommand { +func newPlatformVnetAdminSetupCommand(app *kingpin.Application) *vnetAdminSetupCommand { cmd := &vnetAdminSetupCommand{ CmdClause: app.Command(teleport.VnetAdminSetupSubCommand, "Start the VNet admin subprocess.").Hidden(), } @@ -116,3 +83,18 @@ func (c *vnetAdminSetupCommand) run(cf *CLIConf) error { return trace.Wrap(vnet.RunAdminProcess(cf.Context, config)) } + +// the vnet-install-service command is only supported on windows. +func newPlatformVnetInstallServiceCommand(app *kingpin.Application) vnetCommandNotSupported { + return vnetCommandNotSupported{} +} + +// the vnet-uninstall-service command is only supported on windows. +func newPlatformVnetUninstallServiceCommand(app *kingpin.Application) vnetCommandNotSupported { + return vnetCommandNotSupported{} +} + +// the vnet-service command is only supported on windows. +func newPlatformVnetServiceCommand(app *kingpin.Application) vnetCommandNotSupported { + return vnetCommandNotSupported{} +} diff --git a/tool/tsh/common/vnet_nodaemon.go b/tool/tsh/common/vnet_nodaemon.go index 2e6d516e214f8..d9142729d9f65 100644 --- a/tool/tsh/common/vnet_nodaemon.go +++ b/tool/tsh/common/vnet_nodaemon.go @@ -21,18 +21,9 @@ package common import ( "github.com/alecthomas/kingpin/v2" - "github.com/gravitational/trace" ) -func newVnetDaemonCommand(app *kingpin.Application) vnetDaemonNotSupported { - return vnetDaemonNotSupported{} -} - -type vnetDaemonNotSupported struct{} - -func (vnetDaemonNotSupported) FullCommand() string { - return "" -} -func (vnetDaemonNotSupported) run(*CLIConf) error { - return trace.NotImplemented("tsh was built without support for VNet daemon") +// The vnet-daemon command is only supported with the vnetdaemon tag on darwin. +func newPlatformVnetDaemonCommand(app *kingpin.Application) vnetCommandNotSupported { + return vnetCommandNotSupported{} } diff --git a/tool/tsh/common/vnet_other.go b/tool/tsh/common/vnet_other.go index dc705ee824567..86e0ee764725b 100644 --- a/tool/tsh/common/vnet_other.go +++ b/tool/tsh/common/vnet_other.go @@ -1,6 +1,3 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - // Teleport // Copyright (C) 2024 Gravitational, Inc. // @@ -17,34 +14,30 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . +//go:build !darwin && !windows +// +build !darwin,!windows + package common import ( "github.com/alecthomas/kingpin/v2" - "github.com/gravitational/trace" - - "github.com/gravitational/teleport/lib/vnet" ) -func newVnetCommand(app *kingpin.Application) vnetNotSupported { - return vnetNotSupported{} -} +// Satisfy unused linter. +var _ = newVnetAppProvider -func newVnetAdminSetupCommand(app *kingpin.Application) vnetNotSupported { - return vnetNotSupported{} +func newPlatformVnetAdminSetupCommand(app *kingpin.Application) vnetCLICommand { + return vnetCommandNotSupported{} } -type vnetNotSupported struct{} - -func (vnetNotSupported) FullCommand() string { - return "" +func newPlatformVnetInstallServiceCommand(app *kingpin.Application) vnetCLICommand { + return vnetCommandNotSupported{} } -func (vnetNotSupported) run(*CLIConf) error { - return trace.Wrap(vnet.ErrVnetNotImplemented) + +func newPlatformVnetUninstallServiceCommand(app *kingpin.Application) vnetCLICommand { + return vnetCommandNotSupported{} } -var ( - // Satisfy unused linter. - _ = (*vnetAppProvider)(nil) - _ = newVnetAppProvider -) +func newPlatformVnetServiceCommand(app *kingpin.Application) vnetCLICommand { + return vnetCommandNotSupported{} +} diff --git a/tool/tsh/common/vnet_windows.go b/tool/tsh/common/vnet_windows.go index 59d90972f2971..67aa8722fd2dd 100644 --- a/tool/tsh/common/vnet_windows.go +++ b/tool/tsh/common/vnet_windows.go @@ -17,95 +17,71 @@ package common import ( - "fmt" - "os" - "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" - - "github.com/gravitational/teleport" - "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/vnet" - "github.com/gravitational/teleport/lib/vnet/daemon" + "golang.org/x/sys/windows/svc" ) -type vnetCommand struct { +var windowsServiceNotImplemented = &trace.NotImplementedError{Message: "VNet Windows service is not yet implemented"} + +type vnetInstallServiceCommand struct { *kingpin.CmdClause } -func newVnetCommand(app *kingpin.Application) *vnetCommand { - cmd := &vnetCommand{ - CmdClause: app.Command("vnet", "Start Teleport VNet, a virtual network for TCP application access.").Hidden(), +func newPlatformVnetInstallServiceCommand(app *kingpin.Application) *vnetInstallServiceCommand { + cmd := &vnetInstallServiceCommand{ + CmdClause: app.Command("vnet-install-service", "Install the VNet Windows service.").Hidden(), } return cmd } -func (c *vnetCommand) run(cf *CLIConf) error { - appProvider, err := newVnetAppProvider(cf) - if err != nil { - return trace.Wrap(err) - } - - processManager, err := vnet.Run(cf.Context, &vnet.RunConfig{AppProvider: appProvider}) - if err != nil { - return trace.Wrap(err) - } +func (c *vnetInstallServiceCommand) run(cf *CLIConf) error { + // TODO(nklaassen): implement VNet Windows service installation. + return trace.Wrap(windowsServiceNotImplemented) +} - go func() { - <-cf.Context.Done() - processManager.Close() - }() +type vnetUninstallServiceCommand struct { + *kingpin.CmdClause +} - fmt.Println("VNet is ready.") +func newPlatformVnetUninstallServiceCommand(app *kingpin.Application) *vnetUninstallServiceCommand { + cmd := &vnetUninstallServiceCommand{ + CmdClause: app.Command("vnet-uninstall-service", "Uninstall (delete) the VNet Windows service.").Hidden(), + } + return cmd +} - return trace.Wrap(processManager.Wait()) +func (c *vnetUninstallServiceCommand) run(cf *CLIConf) error { + // TODO(nklaassen): implement VNet Windows service uninstallation. + return trace.Wrap(windowsServiceNotImplemented) } -// vnetAdminSetupCommand is the fallback command run as root when tsh isn't -// compiled with the vnetdaemon build tag. This is typically the case when -// running tsh in development where it's not signed and bundled in tsh.app. -// -// This command expects TELEPORT_HOME to be set to the tsh home of the user who wants to run VNet. -type vnetAdminSetupCommand struct { +// vnetServiceCommand is the command that runs the Windows service. +type vnetServiceCommand struct { *kingpin.CmdClause - // socketPath is a path to a unix socket used for passing a TUN device from the admin process to - // the unprivileged process. - socketPath string - // ipv6Prefix is the IPv6 prefix for the VNet. - ipv6Prefix string - // dnsAddr is the IP address for the VNet DNS server. - dnsAddr string } -func newVnetAdminSetupCommand(app *kingpin.Application) *vnetAdminSetupCommand { - cmd := &vnetAdminSetupCommand{ - CmdClause: app.Command(teleport.VnetAdminSetupSubCommand, "Start the VNet admin subprocess.").Hidden(), +func newPlatformVnetServiceCommand(app *kingpin.Application) *vnetServiceCommand { + cmd := &vnetServiceCommand{ + CmdClause: app.Command("vnet-service", "Start the VNet service.").Hidden(), } - cmd.Flag("socket", "socket path").StringVar(&cmd.socketPath) - cmd.Flag("ipv6-prefix", "IPv6 prefix for the VNet").StringVar(&cmd.ipv6Prefix) - cmd.Flag("dns-addr", "VNet DNS address").StringVar(&cmd.dnsAddr) return cmd } -func (c *vnetAdminSetupCommand) run(cf *CLIConf) error { - homePath := os.Getenv(types.HomeEnvVar) - if homePath == "" { - // This runs as root so we need to be configured with the user's home path. - return trace.BadParameter("%s must be set", types.HomeEnvVar) +func (c *vnetServiceCommand) run(_ *CLIConf) error { + if !isWindowsService() { + return trace.Errorf("not running as a Windows service, cannot run vnet-service command") } + // TODO(nklaassen): implement VNet Windows service. + return trace.Wrap(windowsServiceNotImplemented) +} - config := daemon.Config{ - SocketPath: c.socketPath, - IPv6Prefix: c.ipv6Prefix, - DNSAddr: c.dnsAddr, - HomePath: homePath, - ClientCred: daemon.ClientCred{ - // TODO(nklaassen): figure out how to pass some form of user - // identifier. For now Valid: true is a hack to make - // CheckAndSetDefaults pass. - Valid: true, - }, - } +func isWindowsService() bool { + isSvc, err := svc.IsWindowsService() + return err == nil && isSvc +} - return trace.Wrap(vnet.RunAdminProcess(cf.Context, config)) +// the admin-setup command is only supported on darwin. +func newPlatformVnetAdminSetupCommand(*kingpin.Application) vnetCommandNotSupported { + return vnetCommandNotSupported{} } From 3c95e89a2fa306f24c30992a0f381d703d611cf9 Mon Sep 17 00:00:00 2001 From: Bernard Kim Date: Mon, 13 Jan 2025 17:15:32 -0800 Subject: [PATCH 14/15] Operator managed trusted_cluster guide (#50847) * Operator managed trusted_cluster guide * Fix lint * Fix lint * Fix lint - Add How it works section - Capitalize Teleport Agent * Fix list indentation * Link Linux demo guide * Replace invitation token -> join token * Consolidate tctl, operator, and terraform guide * Use double quotes in tf file The latest version of terraform does not support single quotes * version field is required * Fix links --- docs/cspell.json | 1 + .../managing-resources/trusted-cluster.mdx | 570 ++++++++++++++++++ .../terraform-provider/local.mdx | 2 +- .../resources/trusted_cluster.mdx | 1 + .../teleport_trusted_cluster/resource.tf | 1 + 5 files changed, 574 insertions(+), 1 deletion(-) create mode 100644 docs/pages/admin-guides/infrastructure-as-code/managing-resources/trusted-cluster.mdx diff --git a/docs/cspell.json b/docs/cspell.json index c369da521ab43..8c2ce815a4ec8 100644 --- a/docs/cspell.json +++ b/docs/cspell.json @@ -949,6 +949,7 @@ "topk", "tpmrm", "trustedclusters", + "trustedclustersv2", "trustpolicy", "truststore", "tshd", diff --git a/docs/pages/admin-guides/infrastructure-as-code/managing-resources/trusted-cluster.mdx b/docs/pages/admin-guides/infrastructure-as-code/managing-resources/trusted-cluster.mdx new file mode 100644 index 0000000000000..4be7a7c7f24f8 --- /dev/null +++ b/docs/pages/admin-guides/infrastructure-as-code/managing-resources/trusted-cluster.mdx @@ -0,0 +1,570 @@ +--- +title: Managing Trusted Clusters With IaC +description: Use infrastructure-as-code tooling to create Teleport trusted clusters. +--- + + +Trusted clusters are only available for self-hosted Teleport clusters. + + +This guide will explain how to deploy trusted clusters through infrastructure as +code. + +## How it works + +Teleport supports three ways to dynamically create resources from code: + +- The Teleport Kubernetes Operator, which allows you to manage Teleport resources + from Kubernetes +- The Teleport Terraform Provider, which allows you to manage Teleport resources + via Terraform +- The `tctl` CLI, which allows you to manage Teleport resources from your local + computer or your CI environment + +## Prerequisites + +- Access to **two** Teleport cluster instances. Follow the [Run a Self-Hosted Demo Cluster](../../deploy-a-cluster/linux-demo.mdx) + guide to learn how to deploy a self-hosted Teleport cluster on a Linux server. + + The two clusters should be at the same version or, at most, the leaf cluster can be one major version + behind the root cluster version. + +- A Teleport SSH server that is joined to the cluster you plan to use as the **leaf cluster**. + For information about how to enroll a resource in your cluster, see + [Join Services to your Cluster](../../../enroll-resources/agents/join-services-to-your-cluster/join-services-to-your-cluster.mdx). + +- Read through the [Configure Trusted Clusters](../../management/admin/trustedclusters.mdx) + guide to understand how trusted clusters works. + +- The `tctl` admin tool and `tsh` client tool. + + + + +- Read through the [Looking up values from secrets](../teleport-operator/secret-lookup.mdx) guide + to understand how to store sensitive custom resource secrets in Kubernetes + Secrets. + +- [Helm](https://helm.sh/docs/intro/quickstart/) + +- [kubectl](https://kubernetes.io/docs/tasks/tools/) + +- Validate Kubernetes connectivity by running the following command: + + ```code + $ kubectl cluster-info + # Kubernetes control plane is running at https://127.0.0.1:6443 + # CoreDNS is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + ``` + + + Users wanting to experiment locally with the Operator can use [minikube](https://minikube.sigs.k8s.io/docs/start/) + to start a local Kubernetes cluster: + + ```code + $ minikube start + ``` + + + +- Follow the [Teleport operator guides](../teleport-operator/teleport-operator.mdx) + to install the Teleport Kubernetes Operator in your Kubernetes cluster. + + Confirm that the CRD (Custom Resource Definition) for trusted clusters has + been installed with the following command: + + ```code + $ kubectl explain TeleportTrustedClusterV2.spec + GROUP: resources.teleport.dev + KIND: TeleportTrustedClusterV2 + VERSION: v1 + + FIELD: spec + + + DESCRIPTION: + TrustedCluster resource definition v2 from Teleport + + FIELDS: + enabled + Enabled is a bool that indicates if the TrustedCluster is enabled or + disabled. Setting Enabled to false has a side effect of deleting the user + and host certificate authority (CA). + + role_map <[]Object> + RoleMap specifies role mappings to remote roles. + + token + Token is the authorization token provided by another cluster needed by this + cluster to join. + + tunnel_addr + ReverseTunnelAddress is the address of the SSH proxy server of the cluster + to join. If not set, it is derived from `:`. + + web_proxy_addr + ProxyAddress is the address of the web proxy server of the cluster to join. + If not set, it is derived from `:`. + ``` + + + + +A functional Teleport Terraform provider by following [the Terraform provider guide](../terraform-provider/terraform-provider.mdx). + + + + +## Step 1/5. Prepare the leaf cluster environment + +This guide demonstrates how to enable users of your root cluster to access +a server in your leaf cluster with a specific user identity and role. +For this example, the user identity you can use to access the server in the leaf +cluster is `visitor`. Therefore, to prepare your environment, you first need to +create the `visitor` user and a Teleport role that can assume this username when +logging in to the server in the leaf cluster. + +To add a user and role for accessing the trusted cluster: + +1. Open a terminal shell on the server running the Teleport Agent in the leaf cluster. + +1. Add the local `visitor` user and create a home directory for the user by running the +following command: + + ```code + $ sudo useradd --create-home visitor + ``` + + The home directory is required for the `visitor` user to access a shell on the server. + +1. Sign out of all user logins and clusters by running the following command: + + ```code + $ tsh logout + ``` + +1. Sign in to your **leaf cluster** from your administrative workstation using +your Teleport username: + + ```code + $ tsh login --proxy= --user= + ``` + + Replace `leafcluster.example.com` with the Teleport leaf cluster domain and + `myuser` with your Teleport username. + +1. Create a role definition file called `visitor.yaml` with the following content: + + ```yaml + kind: role + version: v7 + metadata: + name: visitor + spec: + allow: + logins: + - visitor + node_labels: + '*': '*' + ``` + + You must explicitly allow access to nodes with labels to SSH into the server running + the Teleport Agent. In this example, the `visitor` login is allowed access to any server. + +1. Create the `visitor` role by running the following command: + + ```code + $ tctl create visitor.yaml + ``` + + You now have a `visitor` role on your leaf cluster. The `visitor` role allows + users with the `visitor` login to access nodes in the leaf cluster. In the next step, + you must add the `visitor` login to your user so you can satisfy the conditions of + the role and access the server in the leaf cluster. + + +## Step 2/5. Prepare the root cluster environment + +Before you can test access to the server in the leaf cluster, you must have a +Teleport user that can assume the `visitor` login. Because authentication is +handled by the root cluster, you need to add the `visitor` login to a user in the +root cluster. + +To add the login to your Teleport user: + +1. Sign out of all user logins and clusters by running the following command: + + ```code + $ tsh logout + ``` + +1. Sign in to your **root cluster** from your administrative workstation using +your Teleport username: + + ```code + $ tsh login --proxy= --user= + ``` + + Replace `rootcluster.example.com` with the Teleport root cluster domain and + `myuser` with your Teleport username. + +1. Open your user resource in your editor by running a command similar to the +following: + + ```code + $ tctl edit user/ + ``` + + Replace `myuser` with your Teleport username. + +1. Add the `visitor` login: + + ```diff + traits: + logins: + + - visitor + - ubuntu + - root + ``` + +1. Apply your changes by saving and closing the file in your editor. + +## Step 3/5. Generate a trusted cluster join token + +Before users from the root cluster can access the server in the +leaf cluster using the `visitor` role, you must define a trust relationship +between the clusters. Teleport establishes trust between the root cluster and a +leaf cluster using a **join token**. + +To set up trust between clusters, you must first create the join token using the +Teleport Auth Service in the root cluster. You can then use the Teleport Auth Service +on the leaf cluster to create a `trusted_cluster` resource that includes the join token, +proving to the root cluster that the leaf cluster is the one you expect to register. + +To establish the trust relationship: + +1. Sign out of all user logins and clusters by running the following command: + + ```code + $ tsh logout + ``` + +1. Sign in to your **root cluster** from your administrative workstation using +your Teleport username: + + ```code + $ tsh login --proxy= --user= + ``` + + Replace `rootcluster.example.com` with the Teleport root cluster domain and + `myuser` with your Teleport username. + +1. Generate the join token by running the following command: + + ```code + $ tctl tokens add --type=trusted_cluster --ttl=5m + The cluster join token: (=presets.tokens.first=) + ``` + + This command generates a trusted cluster join token to allow an inbound + connection from a leaf cluster. The token can be used multiple times. In this + command example, the token has an expiration time of five minutes. + + Note that the join token is only used to establish a + connection for the first time. Clusters exchange certificates and + don't use tokens to re-establish their connection afterward. + + You can copy the token for later use. If you need to display the token again, + run the following command against your root cluster: + + ```code + $ tctl tokens ls + Token Type Labels Expiry Time (UTC) + -------------------------------------------------------- --------------- -------- --------------------------- + (=presets.tokens.first=) trusted_cluster 28 Apr 22 19:19 UTC (4m48s) + ``` + + +The trusted cluster join token is sensitive information and should not be stored +directly in the trusted cluster custom resource. Instead, store the token in a +Kubernetes secret. The trusted cluster resource can then be configured to +perform a secret lookup in the next step. + + ```yaml + # secret.yaml + apiVersion: v1 + kind: Secret + metadata: + name: teleport-trusted-cluster + annotations: + # This annotation allows any CR to look up this secret. + # You may want to restrict which CRs are allowed to look up this secret. + resources.teleport.dev/allow-lookup-from-cr: "*" + # We use stringData instead of data for the sake of simplicity, both are OK + stringData: + token: (=presets.tokens.first=) + ``` + + ```code + $ kubectl apply -f secret.yaml + ``` + + +## Step 4/5. Create a trusted cluster resource + +You're now ready to configure and create the trusted cluster resource. + + + + + +1. Configure your Teleport trusted cluster resource in a file called +`trusted-cluster.yaml`. + + ```yaml + # trusted-cluster.yaml + kind: trusted_cluster + version: v2 + metadata: + # The resource name must match the name of the trusted cluster. + name: rootcluster.example.com + spec: + # enabled enables the trusted cluster relationship. + enabled: true + + # token specifies the join token. + token: (=presets.tokens.first=) + + # role_map maps Teleport roles from the root cluster in the leaf cluster. + # In this case, users with the `access` role in the root cluster are granted + # the `visitor` role in the leaf cluster. + role_map: + - remote: "access" + local: ["visitor"] + + # tunnel_addr specifies the reverse tunnel address of the root cluster proxy. + tunnel_addr: rootcluster.example.com:443 + + # web_proxy_addr specifies the address of the root cluster proxy. + web_proxy_addr: rootcluster.example.com:443 + ``` + +1. Sign in to your **leaf cluster** from your administrative workstation using +your Teleport username: + + ```code + $ tsh login --proxy= --user= + ``` + +1. Create the trusted cluster resource from the resource configuration file by running +the following command: + + ```code + $ tctl create trusted_cluster.yaml + ``` + + You can also configure leaf clusters directly in the Teleport Web UI. + For example, you can select **Management**, then click **Trusted Clusters** to create a + new `trusted_cluster` resource or manage an existing trusted cluster. + +1. List the created `trusted_cluster` resource: + + ```code + $ tctl get tc + kind: trusted_cluster + version: v2 + metadata: + name: rootcluster.example.com + revision: ba8205a9-c82c-458b-a0f6-76f7c4145672 + spec: + enabled: true + role_map: + - local: + - visitor + remote: access + token: (=presets.tokens.first=) + tunnel_addr: rootcluster.example.com:443 + web_proxy_addr: rootcluster.example.com:443 + ``` + + + + +1. Configure your Kubernetes trusted cluster resource in a file called +`trusted-cluster.yaml`. + + ```yaml + # trusted-cluster.yaml + apiVersion: resources.teleport.dev/v1 + kind: TeleportTrustedClusterV2 + metadata: + # The resource name must match the name of the trusted cluster. + name: rootcluster.example.com + spec: + # enabled enables the trusted cluster relationship. + enabled: true + + # token specifies the join token. + # This value will be resolved from the previously stored secret. + # `teleport-trusted-cluster` is the secret name and `token` is the secret key. + token: "secret://teleport-trusted-cluster/token" + + # role_map maps Teleport roles from the root cluster in the leaf cluster. + # In this case, users with the `access` role in the root cluster are granted + # the `visitor` role in the leaf cluster. + role_map: + - remote: access + local: + - visitor + + # tunnel_addr specifies the reverse tunnel address of the root cluster proxy. + tunnel_addr: rootcluster.example.com:443 + + # web_proxy_addr specifies the address of the root cluster proxy. + web_proxy_addr: rootcluster.example.com:443 + ``` + +1. Create the Kubernetes resource: + + ```code + $ kubectl apply -f trusted-cluster.yaml + ``` + +1. List the created Kubernetes resource: + + ```code + $ kubectl get trustedclustersv2 + NAMESPACE NAME AGE + default rootcluster.example.com 60s + ``` + + + + + +1. Configure your Terraform trusted cluster resource in a file called +`trusted-cluster.tf`. + + ```hcl + # trusted-cluster.tf + resource "teleport_trusted_cluster" "cluster" { + version: v2 + metadata = { + # The resource name must match the name of the trusted cluster. + name = "rootcluster.example.com" + } + + spec = { + # enabled enables the trusted cluster relationship. + enabled = true + + # token specifies the join token. + token = "(=presets.tokens.first=)" + + # role_map maps Teleport roles from the root cluster in the leaf cluster. + # In this case, users with the `access` role in the root cluster are granted + # the `visitor` role in the leaf cluster. + role_map = [{ + remote = "access" + local = ["visitor"] + }] + + # tunnel_addr specifies the reverse tunnel address of the root cluster proxy. + tunnel_addr = "rootcluster.example.com:443" + + # web_proxy_addr specifies the address of the root cluster proxy. + web_proxy_addr = "rootcluster.example.com:443" + } + } + ``` + +1. Plan and apply the terraform resources + + ```code + $ terraform plan + [...] + Plan: 1 to add, 0 to change, 0 to destroy. + + $ terraform apply + [...] + teleport_trusted_cluster.cluster: Creating... + teleport_trusted_cluster.cluster: Creation complete after 0s [id=rootcluster.example.com] + Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + ``` + + + + +1. Sign out of the leaf cluster and sign back in to the root cluster. + +1. Verify the trusted cluster configuration by running the following command in +the root cluster: + + ```code + $ tsh clusters + Cluster Name Status Cluster Type Labels Selected + --------------------------- ------ ------------ ------ -------- + rootcluster.example.com online root * + leafcluster.example.com online leaf + ``` + +## Step 5/5. Access a server in the leaf cluster + +With the `trusted_cluster` resource you created earlier, you can log in to the +server in your leaf cluster as a user of your root cluster. + +To test access to the server: + +1. Verify that you are signed in as a Teleport user on the root cluster by +running the following command: + + ```code + $ tsh status + ``` + +1. Confirm that the server running the Teleport agent is joined to the leaf cluster by +running a command similar to the following: + + ```code + $ tsh ls --cluster= + Node Name Address Labels + --------------- -------------- ------------------------------------ + ip-172-3-1-242 127.0.0.1:3022 hostname=ip-172-3-1-242 + ip-172-3-2-205 ⟵ Tunnel hostname=ip-172-3-2-205 + ``` + +1. Open a secure shell connection using the `visitor` login: + + ```code + $ tsh ssh --cluster= visitor@ip-172-3-2-205 + ``` + +1. Confirm you are logged in with as the user `visitor` on the server +in the leaf cluster by running the following commands: + + ```code + $ pwd + /home/visitor + $ uname -a + Linux ip-172-3-2-205 5.15.0-1041-aws #46~20.04.1-Ubuntu SMP Wed Jul 19 15:39:29 UTC 2023 aarch64 aarch64 aarch64 GNU/Linux + ``` + + +**Manage an existing trusted cluster with the Teleport Kubernetes Operator** + +If you have an existing trusted cluster that you would like to manage with the +Teleport Kubernetes Operator, you can do this by first setting the trusted +cluster label `teleport.dev/origin: kubernetes`. The Teleport Kubernetes +Operator will then be able to adopt the `trusted_cluster` as a managed resource. + +```yaml +kind: trusted_cluster +metadata: + name: rootcluster.example.com + labels: + teleport.dev/origin: kubernetes +... +``` + \ No newline at end of file diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx index 2dce9185ce9fb..4d0ec62dca5ce 100644 --- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx +++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx @@ -96,7 +96,7 @@ You can run the Teleport Terraform provider from this shell. } provider "teleport" { - addr = '' + addr = "" } # We must create a test role, if we don't declare resources, Terraform won't try to diff --git a/docs/pages/reference/terraform-provider/resources/trusted_cluster.mdx b/docs/pages/reference/terraform-provider/resources/trusted_cluster.mdx index 4db28033c5628..99b2ec2f7a052 100644 --- a/docs/pages/reference/terraform-provider/resources/trusted_cluster.mdx +++ b/docs/pages/reference/terraform-provider/resources/trusted_cluster.mdx @@ -15,6 +15,7 @@ description: This page describes the supported values of the teleport_trusted_cl # Teleport trusted cluster resource "teleport_trusted_cluster" "cluster" { + version = "v2" metadata = { name = "primary" labels = { diff --git a/integrations/terraform/examples/resources/teleport_trusted_cluster/resource.tf b/integrations/terraform/examples/resources/teleport_trusted_cluster/resource.tf index 9c2f253909bda..89a7070a6d50f 100644 --- a/integrations/terraform/examples/resources/teleport_trusted_cluster/resource.tf +++ b/integrations/terraform/examples/resources/teleport_trusted_cluster/resource.tf @@ -1,6 +1,7 @@ # Teleport trusted cluster resource "teleport_trusted_cluster" "cluster" { + version = "v2" metadata = { name = "primary" labels = { From 7d38f13b9d0475f6d4f7c3255255d164e36cde7c Mon Sep 17 00:00:00 2001 From: Gavin Frazar Date: Mon, 13 Jan 2025 18:49:57 -0800 Subject: [PATCH 15/15] Only apply dynamic AWS settings to dynamic AWS dbs (#50970) * Only apply dynamic AWS settings to dynamic AWS dbs Dynamic database resource matchers can include AWS settings to assume an AWS IAM role when they match a database. The settings should only be applied to dynamic AWS databases. The db service will no longer apply these settings to non-AWS databases. It will also no longer apply these settings to databases discovered by the legacy cloud watchers in db_service.aws - the cloud watchers have an assume_role_arn setting that should not be overridden by dynamic database matcher settings. * fix reconcilitation race --- lib/srv/db/server.go | 27 +++++++++------- lib/srv/db/watcher.go | 33 +++++++++++++++----- lib/srv/db/watcher_test.go | 63 ++++++++++++++++++++++++++------------ 3 files changed, 84 insertions(+), 39 deletions(-) diff --git a/lib/srv/db/server.go b/lib/srv/db/server.go index dfb1a4b164192..6c8da739fc5db 100644 --- a/lib/srv/db/server.go +++ b/lib/srv/db/server.go @@ -391,9 +391,10 @@ func (m *monitoredDatabases) setCloud(databases types.Databases) { m.cloud = databases } -func (m *monitoredDatabases) isCloud(database types.Database) bool { - m.mu.RLock() - defer m.mu.RUnlock() +// isCloud_Locked returns whether a database was discovered by the cloud +// watchers, aka legacy database discovery done by the db service. +// The lock must be held when calling this function. +func (m *monitoredDatabases) isCloud_Locked(database types.Database) bool { for i := range m.cloud { if m.cloud[i] == database { return true @@ -402,13 +403,17 @@ func (m *monitoredDatabases) isCloud(database types.Database) bool { return false } -func (m *monitoredDatabases) isDiscoveryResource(database types.Database) bool { - return database.Origin() == types.OriginCloud && m.isResource(database) +// isDiscoveryResource_Locked returns whether a database was discovered by the +// discovery service. +// The lock must be held when calling this function. +func (m *monitoredDatabases) isDiscoveryResource_Locked(database types.Database) bool { + return database.Origin() == types.OriginCloud && m.isResource_Locked(database) } -func (m *monitoredDatabases) isResource(database types.Database) bool { - m.mu.RLock() - defer m.mu.RUnlock() +// isResource_Locked returns whether a database is a dynamic database, aka a db +// object. +// The lock must be held when calling this function. +func (m *monitoredDatabases) isResource_Locked(database types.Database) bool { for i := range m.resources { if m.resources[i] == database { return true @@ -417,9 +422,9 @@ func (m *monitoredDatabases) isResource(database types.Database) bool { return false } -func (m *monitoredDatabases) get() map[string]types.Database { - m.mu.RLock() - defer m.mu.RUnlock() +// getLocked returns a slice containing all of the monitored databases. +// The lock must be held when calling this function. +func (m *monitoredDatabases) getLocked() map[string]types.Database { return utils.FromSlice(append(append(m.static, m.resources...), m.cloud...), types.Database.GetName) } diff --git a/lib/srv/db/watcher.go b/lib/srv/db/watcher.go index 2dc1dcb11d35c..036aee8952922 100644 --- a/lib/srv/db/watcher.go +++ b/lib/srv/db/watcher.go @@ -40,7 +40,7 @@ func (s *Server) startReconciler(ctx context.Context) error { reconciler, err := services.NewReconciler(services.ReconcilerConfig[types.Database]{ Matcher: s.matcher, GetCurrentResources: s.getResources, - GetNewResources: s.monitoredDatabases.get, + GetNewResources: s.monitoredDatabases.getLocked, OnCreate: s.onCreate, OnUpdate: s.onUpdate, OnDelete: s.onDelete, @@ -53,12 +53,15 @@ func (s *Server) startReconciler(ctx context.Context) error { for { select { case <-s.reconcileCh: + // don't let monitored dbs change during reconciliation + s.monitoredDatabases.mu.RLock() if err := reconciler.Reconcile(ctx); err != nil { s.log.ErrorContext(ctx, "Failed to reconcile.", "error", err) } if s.cfg.OnReconcile != nil { s.cfg.OnReconcile(s.getProxiedDatabases()) } + s.monitoredDatabases.mu.RUnlock() case <-ctx.Done(): s.log.DebugContext(ctx, "Reconciler done.") return @@ -169,11 +172,15 @@ func (s *Server) onCreate(ctx context.Context, database types.Database) error { // copy here so that any attribute changes to the proxied database will not // affect database objects tracked in s.monitoredDatabases. databaseCopy := database.Copy() - applyResourceMatchersToDatabase(databaseCopy, s.cfg.ResourceMatchers) + + // only apply resource matcher settings to dynamic resources. + if s.monitoredDatabases.isResource_Locked(database) { + s.applyAWSResourceMatcherSettings(databaseCopy) + } // Run DiscoveryResourceChecker after resource matchers are applied to make // sure the correct AssumeRoleARN is used. - if s.monitoredDatabases.isDiscoveryResource(database) { + if s.monitoredDatabases.isDiscoveryResource_Locked(database) { if err := s.cfg.discoveryResourceChecker.Check(ctx, databaseCopy); err != nil { return trace.Wrap(err) } @@ -187,7 +194,11 @@ func (s *Server) onUpdate(ctx context.Context, database, _ types.Database) error // copy here so that any attribute changes to the proxied database will not // affect database objects tracked in s.monitoredDatabases. databaseCopy := database.Copy() - applyResourceMatchersToDatabase(databaseCopy, s.cfg.ResourceMatchers) + + // only apply resource matcher settings to dynamic resources. + if s.monitoredDatabases.isResource_Locked(database) { + s.applyAWSResourceMatcherSettings(databaseCopy) + } return s.updateDatabase(ctx, databaseCopy) } @@ -200,7 +211,7 @@ func (s *Server) onDelete(ctx context.Context, database types.Database) error { func (s *Server) matcher(database types.Database) bool { // In the case of databases discovered by this database server, matchers // should be skipped. - if s.monitoredDatabases.isCloud(database) { + if s.monitoredDatabases.isCloud_Locked(database) { return true // Cloud fetchers return only matching databases. } @@ -209,12 +220,18 @@ func (s *Server) matcher(database types.Database) bool { return services.MatchResourceLabels(s.cfg.ResourceMatchers, database.GetAllLabels()) } -func applyResourceMatchersToDatabase(database types.Database, resourceMatchers []services.ResourceMatcher) { - for _, matcher := range resourceMatchers { +func (s *Server) applyAWSResourceMatcherSettings(database types.Database) { + if !database.IsAWSHosted() { + // dynamic matchers only apply AWS settings (for now), so skip non-AWS + // databases. + return + } + dbLabels := database.GetAllLabels() + for _, matcher := range s.cfg.ResourceMatchers { if len(matcher.Labels) == 0 || matcher.AWS.AssumeRoleARN == "" { continue } - if match, _, _ := services.MatchLabels(matcher.Labels, database.GetAllLabels()); !match { + if match, _, _ := services.MatchLabels(matcher.Labels, dbLabels); !match { continue } diff --git a/lib/srv/db/watcher_test.go b/lib/srv/db/watcher_test.go index 6020547ea9590..6c94e201de177 100644 --- a/lib/srv/db/watcher_test.go +++ b/lib/srv/db/watcher_test.go @@ -21,6 +21,7 @@ package db import ( "context" "fmt" + "maps" "sort" "testing" "time" @@ -60,11 +61,13 @@ func TestWatcher(t *testing.T) { // watches for databases with label group=a. testCtx.setupDatabaseServer(ctx, t, agentParams{ Databases: []types.Database{db0}, - ResourceMatchers: []services.ResourceMatcher{ - {Labels: types.Labels{ + ResourceMatchers: []services.ResourceMatcher{{ + Labels: types.Labels{ "group": []string{"a"}, - }}, - }, + }, + // these should not be applied to non-AWS databases. + AWS: services.ResourceMatcherAWS{AssumeRoleARN: "some-role", ExternalID: "some-externalid"}, + }}, OnReconcile: func(d types.Databases) { reconcileCh <- d }, @@ -137,7 +140,7 @@ func TestWatcher(t *testing.T) { // ResourceMatchers should be always evaluated for the dynamic registered // resources. func TestWatcherDynamicResource(t *testing.T) { - var db1, db2, db3, db4, db5 *types.DatabaseV3 + var db1, db2, db3, db4, db5, db6 *types.DatabaseV3 ctx := context.Background() testCtx := setupTestContext(ctx, t) @@ -247,6 +250,7 @@ func TestWatcherDynamicResource(t *testing.T) { // ResourceMatchers and has AssumeRoleARN set by the discovery service. discoveredDB5, err := makeDiscoveryDatabase("db5", map[string]string{"group": "b"}, withRDSURL, withDiscoveryAssumeRoleARN) require.NoError(t, err) + require.True(t, discoveredDB5.IsAWSHosted()) require.True(t, discoveredDB5.IsRDS()) err = testCtx.authServer.CreateDatabase(ctx, discoveredDB5) @@ -260,6 +264,23 @@ func TestWatcherDynamicResource(t *testing.T) { assertReconciledResource(t, reconcileCh, types.Databases{db0, db2, db4, db5}) }) + t.Run("non-AWS discovery resource - AssumeRoleARN not applied", func(t *testing.T) { + // Created a discovery service created database resource that matches + // ResourceMatchers but is not an AWS database + _, azureDB := makeAzureSQLServer(t, "discovery-azure", "group") + setDiscoveryTypeLabel(azureDB, types.AzureMatcherSQLServer) + setLabels(azureDB, map[string]string{"group": "b"}) + azureDB.SetOrigin(types.OriginCloud) + require.False(t, azureDB.IsAWSHosted()) + require.True(t, azureDB.GetAWS().IsEmpty()) + require.True(t, azureDB.IsAzure()) + err = testCtx.authServer.CreateDatabase(ctx, azureDB) + require.NoError(t, err) + + db6 = azureDB.Copy() + assertReconciledResource(t, reconcileCh, types.Databases{db0, db2, db4, db5, db6}) + }) + t.Run("discovery resource - fail check", func(t *testing.T) { // Created a discovery service created database resource that fails the // fakeDiscoveryResourceChecker. @@ -268,27 +289,20 @@ func TestWatcherDynamicResource(t *testing.T) { require.NoError(t, testCtx.authServer.CreateDatabase(ctx, dbFailCheck)) // dbFailCheck should not be proxied. - assertReconciledResource(t, reconcileCh, types.Databases{db0, db2, db4, db5}) + assertReconciledResource(t, reconcileCh, types.Databases{db0, db2, db4, db5, db6}) }) } -func setDiscoveryGroupLabel(r types.ResourceWithLabels, discoveryGroup string) { - staticLabels := r.GetStaticLabels() - if staticLabels == nil { - staticLabels = make(map[string]string) - } - if discoveryGroup != "" { - staticLabels[types.TeleportInternalDiscoveryGroupName] = discoveryGroup - } - r.SetStaticLabels(staticLabels) +func setDiscoveryTypeLabel(r types.ResourceWithLabels, matcherType string) { + setLabels(r, map[string]string{types.DiscoveryTypeLabel: matcherType}) } -func setDiscoveryTypeLabel(r types.ResourceWithLabels, matcherType string) { +func setLabels(r types.ResourceWithLabels, newLabels map[string]string) { staticLabels := r.GetStaticLabels() if staticLabels == nil { staticLabels = make(map[string]string) } - staticLabels[types.DiscoveryTypeLabel] = matcherType + maps.Copy(staticLabels, newLabels) r.SetStaticLabels(staticLabels) } @@ -301,15 +315,16 @@ func TestWatcherCloudFetchers(t *testing.T) { redshiftServerlessDatabase, err := discovery.NewDatabaseFromRedshiftServerlessWorkgroup(redshiftServerlessWorkgroup, nil) require.NoError(t, err) redshiftServerlessDatabase.SetStatusAWS(redshiftServerlessDatabase.GetAWS()) - setDiscoveryGroupLabel(redshiftServerlessDatabase, "") setDiscoveryTypeLabel(redshiftServerlessDatabase, types.AWSMatcherRedshiftServerless) redshiftServerlessDatabase.SetOrigin(types.OriginCloud) discovery.ApplyAWSDatabaseNameSuffix(redshiftServerlessDatabase, types.AWSMatcherRedshiftServerless) + require.Empty(t, redshiftServerlessDatabase.GetAWS().AssumeRoleARN) + require.Empty(t, redshiftServerlessDatabase.GetAWS().ExternalID) // Test an Azure fetcher. azSQLServer, azSQLServerDatabase := makeAzureSQLServer(t, "discovery-azure", "group") - setDiscoveryGroupLabel(azSQLServerDatabase, "") setDiscoveryTypeLabel(azSQLServerDatabase, types.AzureMatcherSQLServer) azSQLServerDatabase.SetOrigin(types.OriginCloud) + require.False(t, azSQLServerDatabase.IsAWSHosted()) ctx := context.Background() testCtx := setupTestContext(ctx, t) @@ -319,7 +334,15 @@ func TestWatcherCloudFetchers(t *testing.T) { OnReconcile: func(d types.Databases) { reconcileCh <- d }, + ResourceMatchers: []services.ResourceMatcher{{ + Labels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + AWS: services.ResourceMatcherAWS{ + AssumeRoleARN: "role-arn", + ExternalID: "external-id", + }, + }}, CloudClients: &clients.TestCloudClients{ + STS: &mocks.STSClientV1{}, RedshiftServerless: &mocks.RedshiftServerlessMock{ Workgroups: []*redshiftserverless.Workgroup{redshiftServerlessWorkgroup}, }, @@ -351,7 +374,7 @@ func assertReconciledResource(t *testing.T, ch chan types.Databases, databases t select { case d := <-ch: sort.Sort(d) - require.Equal(t, len(d), len(databases)) + require.Equal(t, len(databases), len(d)) require.Empty(t, cmp.Diff(databases, d, cmpopts.IgnoreFields(types.Metadata{}, "Revision"), cmpopts.IgnoreFields(types.DatabaseStatusV3{}, "CACert"),