Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/kubernetes #108

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
/fyne-cross
/*.dmg
/*.xip
/internal/cmd/fyne-cross-s3/fyne-cross-s3
9 changes: 9 additions & 0 deletions docker/base/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,20 @@ ARG FYNE_VERSION
# Install the fyne CLI tool
RUN go install -ldflags="-w -s" -v "fyne.io/fyne/v2/cmd/fyne@${FYNE_VERSION}"

FROM golang:${GO_VERSION}-buster AS fyne-cross-tools

WORKDIR /src

COPY / .
WORKDIR /src/internal/cmd/fyne-cross-s3
RUN go build && go install

# Build the fyne-cross base image
FROM golang:${GO_VERSION}-buster AS base
ARG FIXUID_VERSION

COPY --from=tools /go/bin/fyne /usr/local/bin
COPY --from=fyne-cross-tools /go/bin/fyne-cross-s3 /usr/local/bin

RUN apt-get update -qq \
&& apt-get install -y -q --no-install-recommends \
Expand Down
1 change: 1 addition & 0 deletions docker/windows/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ RUN apt-get update \
&& apt-get install -y -q --no-install-recommends \
gcc-mingw-w64 \
parallel \
zip \
&& apt-get -qy autoremove \
&& apt-get clean \
&& rm -r /var/lib/apt/lists/*;
7 changes: 7 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,13 @@ go 1.13
require (
github.com/BurntSushi/toml v1.0.0
github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9
github.com/aws/aws-sdk-go v1.43.31
github.com/mholt/archiver/v4 v4.0.0-alpha.6
github.com/stretchr/testify v1.7.0
github.com/urfave/cli/v2 v2.4.0
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9
k8s.io/api v0.23.5
k8s.io/apimachinery v0.23.5
k8s.io/client-go v0.23.5
k8s.io/kubectl v0.23.5
)
945 changes: 942 additions & 3 deletions go.sum

Large diffs are not rendered by default.

38 changes: 38 additions & 0 deletions internal/cloud/kubernetes.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package cloud

import (
"path/filepath"

"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)

func GetKubernetesClient() (*rest.Config, *kubernetes.Clientset, error) {
kubeconfig := filepath.Join(homedir.HomeDir(), ".kube", "config")

var config *rest.Config
var err error

if !Exists(kubeconfig) {
// No configuration file, try probing in cluster config
config, err = rest.InClusterConfig()
if err != nil {
return nil, nil, err
}
} else {
// Try to build cluster configuration from file
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, nil, err
}
}

kubectl, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, err
}

return config, kubectl, nil
}
263 changes: 263 additions & 0 deletions internal/cloud/s3.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,263 @@
package cloud

import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"sync"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"

archiver "github.com/mholt/archiver/v4"
)

type AWSSession struct {
s *session.Session
bucket string

m sync.Mutex
cancel context.CancelFunc
}

func Exists(path string) bool {
_, err := os.Stat(path)
if err != nil {
return !errors.Is(err, os.ErrNotExist)
}
return true
}

func NewAWSSessionFromEnvironment() (*AWSSession, error) {
return NewAWSSession("", "", os.Getenv("AWS_S3_ENDPOINT"), os.Getenv("AWS_S3_REGION"), os.Getenv("AWS_S3_BUCKET"))
}

func NewAWSSession(akid string, secret string, endpoint string, region string, bucket string) (*AWSSession, error) {
var cred *credentials.Credentials

if akid != "" && secret != "" {
cred = credentials.NewStaticCredentials(akid, secret, "")
}

s, err := session.NewSession(
&aws.Config{
Endpoint: aws.String(endpoint),
Region: aws.String(region),
Credentials: cred,
},
)
if err != nil {
return nil, err
}

return &AWSSession{s: s, bucket: bucket, cancel: func() {}}, nil
}

func (a *AWSSession) GetCredentials() (credentials.Value, error) {
a.m.Lock()
ctx, cancel := context.WithCancel(context.Background())
a.cancel = cancel
a.m.Unlock()
defer a.Cancel()

return a.s.Config.Credentials.GetWithContext(ctx)
}

func (a *AWSSession) UploadFile(localFile string, s3FilePath string) error {
file, err := os.Open(localFile)
if err != nil {
return err
}
defer file.Close()

uploader := s3manager.NewUploader(a.s)

a.m.Lock()
ctxt, cancel := context.WithCancel(context.Background())
a.cancel = cancel
a.m.Unlock()
defer a.Cancel()

_, err = uploader.UploadWithContext(ctxt, &s3manager.UploadInput{
Bucket: aws.String(a.bucket),
Key: aws.String(s3FilePath),

Body: file,
})

return err
}

func (a *AWSSession) UploadCompressedDirectory(localDirectoy string, s3FilePath string) error {
files, err := archiver.FilesFromDisk(nil, map[string]string{
localDirectoy: "/",
})
if err != nil {
return err
}

format := archiver.CompressedArchive{
Compression: archiver.Zstd{},
Archival: archiver.Tar{},
}

in, out := io.Pipe()

errorChannel := make(chan error)

go func() {
err = format.Archive(context.Background(), out, files)
out.Close()

errorChannel <- err
}()

uploader := s3manager.NewUploader(a.s)

a.m.Lock()
ctxt, cancel := context.WithCancel(context.Background())
a.cancel = cancel
a.m.Unlock()
defer a.Cancel()

_, err = uploader.UploadWithContext(ctxt, &s3manager.UploadInput{
Bucket: aws.String(a.bucket),
Key: aws.String(s3FilePath),

Body: in,
})
if err != nil {
return err
}
in.Close()

err = <-errorChannel
return err
}

func (a *AWSSession) DownloadFile(s3FilePath string, localFile string) error {
f, err := os.Create(localFile)
if err != nil {
return err
}

downloader := s3manager.NewDownloader(a.s)

a.m.Lock()
ctxt, cancel := context.WithCancel(context.Background())
a.cancel = cancel
a.m.Unlock()
defer a.Cancel()

_, err = downloader.DownloadWithContext(ctxt, f, &s3.GetObjectInput{
Bucket: aws.String(a.bucket),
Key: aws.String(s3FilePath),
})

return err
}

func (a *AWSSession) DownloadCompressedDirectory(s3FilePath string, localRootDirectory string) error {
format := archiver.CompressedArchive{
Compression: archiver.Zstd{},
Archival: archiver.Tar{},
}

in, out := io.Pipe()

errorChannel := make(chan error)

go func() {
err := format.Extract(context.Background(), in, nil, func(ctx context.Context, f archiver.File) error {
paths := strings.Split(f.NameInArchive, "/")
if len(paths) == 0 {
return fmt.Errorf("incorrect path")
}
// Replace top directory in the archive with local path
paths[1] = localRootDirectory
localFile := filepath.Join(paths...)

if f.IsDir() {
log.Println("Creating directory:", localFile)
if !Exists(localFile) {
return os.Mkdir(localFile, f.Mode().Perm())
}
return nil
}

inFile, err := f.Open()
if err != nil {
return err
}
if inFile == nil {
// We are ignoring link for now
return nil
}
defer inFile.Close()

log.Println("Creating file", localFile)
outFile, err := os.Create(localFile)
if err != nil {
return err
}
defer outFile.Close()
_, err = io.Copy(outFile, inFile)

return err
})
in.Close()

errorChannel <- err
}()

downloader := s3manager.NewDownloader(a.s)
downloader.Concurrency = 1

a.m.Lock()
ctxt, cancel := context.WithCancel(context.Background())
a.cancel = cancel
a.m.Unlock()
defer a.Cancel()

_, err := downloader.DownloadWithContext(ctxt, fakeWriterAt{out}, &s3.GetObjectInput{
Bucket: aws.String(a.bucket),
Key: aws.String(s3FilePath),
})
out.Close()
if err != nil {
return err
}

err = <-errorChannel
return err

}

func (a *AWSSession) Cancel() {
a.m.Lock()
defer a.m.Unlock()

a.cancel()
a.cancel = func() {}
}

func (a *AWSSession) GetBucket() string {
return a.bucket
}

type fakeWriterAt struct {
w io.Writer
}

func (fw fakeWriterAt) WriteAt(p []byte, offset int64) (n int, err error) {
// ignore 'offset' because we forced sequential downloads
return fw.w.Write(p)
}
Loading