Skip to content
This repository has been archived by the owner on Apr 2, 2022. It is now read-only.

Support for multiple IAM groups #3

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ The general overview for what this tool does can be found here: https://ygrene.t
$ echo -n "secretkey" | base64
```
5) Update the `AWS_REGION` environment variable in `kubernetes/deployment.yaml` if you aren't running in `us-west-2` with your EKS cluster
6) Edit the `kubernetes/deployment.yaml` `command:` with both the IAM group name you want to provide access to, and the Kubernetes group each user in the group should be mapped to.
(there is an example in the manifest already)
6) Edit the `kubernetes/deployment.yaml` `command:` with both the IAM group[s] name you want to provide access to, and the Kubernetes group each user in the group should be mapped to.
If you want to use more than one IAM group, just separate them with `;`. Each IAM group requires separate Kubernetes group[s]. Kubernetes groups sets should be separated by `;` and inside the set separate groups with `,` (there is an example in the manifest already)
7) Finally:
```bash
$ kubectl apply -f kubernetes/
```
8) Rejoice, now user management will be a bit easier.

## Have suggestions or want to contribute?
Raise a PR or file an issue, I'd love to help!
Raise a PR or file an issue, I'd love to help!
4 changes: 2 additions & 2 deletions kubernetes/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ spec:
command:
- ./app
- --aws-iam-group
- devs
- devs;jenkins
- --k8s-cap
- system:masters
- system:masters,cluster-admin;system:basic-user
automountServiceAccountToken: true
serviceAccountName: iam-eks-user-mapper
metadata:
Expand Down
56 changes: 35 additions & 21 deletions src/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,57 +2,70 @@ package main

import (
"flag"
"strings"
"time"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/kataras/golog"
"gopkg.in/yaml.v2"
"strings"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)

func main() {
iamGroup := flag.String("aws-iam-group","", "--aws-iam-group=devs")
k8sCap := flag.String("k8s-cap","", "--k8s-cap=system:masters,aggregate-to-admin")
iamGroups := flag.String("aws-iam-group", "", "--aws-iam-group=devs;jenkins")
k8sCap := flag.String("k8s-cap", "", "--k8s-cap=system:masters,aggregate-to-admin;system:basic-user")
flag.Parse()

//enumerate the k8s roles
roleArr := strings.Split(*k8sCap, ",")
iamGroupsArr := strings.Split(*iamGroups, ";")
k8sCapArr := strings.Split(*k8sCap, ";")
if len(iamGroupsArr) != len(k8sCapArr) {
panic("Amount of IAM groups and Kubernetes roles do not match up.")
}

// creates the in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
panic(err.Error())
}

// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}

for {
users := getAwsIamGroup(*iamGroup)
cf, err := clientset.CoreV1().ConfigMaps("kube-system").Get("aws-auth", metav1.GetOptions{})
if err != nil {
panic(err.Error())
}
var newConfig []MapUserConfig

for _, user := range users.Users {
newConfig = append(newConfig, MapUserConfig{
UserArn: *user.Arn,
Username: *user.UserName,
Groups: roleArr,
})
}
roleStr, err := yaml.Marshal(newConfig)
if err != nil {
golog.Error(err)
cf.Data["mapUsers"] = ""

for index, iamGroup := range iamGroupsArr {
users := getAwsIamGroup(iamGroup)
var newConfig []MapUserConfig

for _, user := range users.Users {
newConfig = append(newConfig, MapUserConfig{
UserArn: *user.Arn,
Username: *user.UserName,
Groups: strings.Split(k8sCapArr[index],","),
})
}

roleStr, err := yaml.Marshal(newConfig)
if err != nil {
golog.Error(err)
}
cf.Data["mapUsers"] = strings.Join([]string{cf.Data["mapUsers"], string(roleStr)}, "")
}
cf.Data["mapUsers"] = string(roleStr)

newCF, err := clientset.CoreV1().ConfigMaps("kube-system").Update(cf)
if err != nil {
Expand All @@ -61,6 +74,7 @@ func main() {
golog.Info("successfully updated user roles")
golog.Info(newCF)
}

time.Sleep(10 * time.Second)
}
}
Expand All @@ -71,7 +85,7 @@ func getAwsIamGroup(groupName string) *iam.GetGroupOutput {
group, err := iamClient.GetGroup(&iam.GetGroupInput{
GroupName: aws.String(groupName),
})
if err != nil{
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case iam.ErrCodeNoSuchEntityException:
Expand All @@ -84,4 +98,4 @@ func getAwsIamGroup(groupName string) *iam.GetGroupOutput {
}
}
return group
}
}