Skip to content

Commit

Permalink
Merge branch 'main' into dependabot/github_actions/actions/setup-go-5
Browse files Browse the repository at this point in the history
  • Loading branch information
elgohr authored Dec 26, 2023
2 parents 13c6353 + c097897 commit 5aac2dd
Show file tree
Hide file tree
Showing 8 changed files with 127 additions and 65 deletions.
18 changes: 17 additions & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,31 @@ jobs:
with:
go-version: '^1.21'
- name: Test
run: go test -race -timeout 30m -coverprofile=coverage.txt -covermode=atomic ./...
run: go test -race -timeout 0 -coverprofile=coverage.txt -covermode=atomic ./...
- name: Coverage
uses: codecov/codecov-action@v3
with:
file: coverage.txt

linter:
runs-on: ubuntu-latest
permissions:
contents: read
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '^1.21'
- uses: golangci/golangci-lint-action@v3

advanced-security:
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
Expand All @@ -31,9 +45,11 @@ jobs:
languages: 'go'
- uses: github/codeql-action/autobuild@v3
- uses: github/codeql-action/analyze@v3

release:
needs:
- test
- linter
- advanced-security
runs-on: ubuntu-latest
timeout-minutes: 5
Expand Down
10 changes: 9 additions & 1 deletion makefile → Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,15 @@
all: # nothing - to speed up advanced security scan

test:
go test -race ./...
go test -race -timeout 0 ./...

update-linting-tools:
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
go install golang.org/x/vuln/cmd/govulncheck@latest

lint: update-linting-tools
golangci-lint run -v ./...
govulncheck ./...

update-dependencies:
go get -u ./...
Expand Down
1 change: 1 addition & 0 deletions exampleV2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

//nolint:all
package localstack_test

import (
Expand Down
1 change: 1 addition & 0 deletions example_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

//nolint:all
package localstack_test

import (
Expand Down
21 changes: 21 additions & 0 deletions golangci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
linters:
enable:
- bidichk
- bodyclose
- containedctx
- gosec
- durationcheck
- errchkjson
- exportloopref
- errorlint
- gocritic
- godox
- noctx

run:
timeout: 10m
allow-parallel-runners: true

linters-settings:
staticcheck:
go: "1.21"
70 changes: 33 additions & 37 deletions localstack.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,9 @@ func (i *Instance) start(ctx context.Context, services ...Service) error {
const imageName = "go-localstack"

func (i *Instance) startLocalstack(ctx context.Context, services ...Service) error {
i.containerIdMutex.Lock()
defer i.containerIdMutex.Unlock()

if err := i.buildLocalImage(ctx); err != nil {
return fmt.Errorf("localstack: could not build image: %w", err)
}
Expand Down Expand Up @@ -319,19 +322,18 @@ func (i *Instance) startLocalstack(ctx context.Context, services ...Service) err
return fmt.Errorf("localstack: could not create container: %w", err)
}

i.setContainerId(resp.ID)
i.containerId = resp.ID

i.log.Info("starting localstack")
containerId := resp.ID
if err := i.cli.ContainerStart(ctx, containerId, types.ContainerStartOptions{}); err != nil {
if err := i.cli.ContainerStart(ctx, i.containerId, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("localstack: could not start container: %w", err)
}

if i.log.Level == logrus.DebugLevel {
go i.writeContainerLogToLogger(ctx, containerId)
go i.writeContainerLogToLogger(ctx, i.containerId)
}

return i.mapPorts(ctx, services, containerId, 0)
return i.mapPorts(ctx, services, i.containerId, 0)
}

//go:embed Dockerfile
Expand Down Expand Up @@ -387,39 +389,41 @@ func (i *Instance) mapPorts(ctx context.Context, services []Service, containerId
time.Sleep(300 * time.Millisecond)
return i.mapPorts(ctx, services, containerId, try+1)
}
i.portMappingMutex.Lock()
defer i.portMappingMutex.Unlock()
i.portMapping[FixedPort] = "localhost:" + bindings[0].HostPort
i.savePortMappings(map[Service]string{
FixedPort: "localhost:" + bindings[0].HostPort,
})
} else {
hasFilteredServices := len(services) > 0
i.portMappingMutex.Lock()
defer i.portMappingMutex.Unlock()
newMapping := make(map[Service]string, len(AvailableServices))
for service := range AvailableServices {
bindings := ports[nat.Port(service.Port)]
if len(bindings) == 0 {
time.Sleep(300 * time.Millisecond)
return i.mapPorts(ctx, services, containerId, try+1)
}
if hasFilteredServices && containsService(services, service) {
i.portMapping[service] = "localhost:" + bindings[0].HostPort
newMapping[service] = "localhost:" + bindings[0].HostPort
} else if !hasFilteredServices {
i.portMapping[service] = "localhost:" + bindings[0].HostPort
newMapping[service] = "localhost:" + bindings[0].HostPort
}
}
i.savePortMappings(newMapping)
}
return nil
}

func (i *Instance) stop() error {
containerId := i.getContainerId()
if containerId == "" {
i.containerIdMutex.Lock()
defer i.containerIdMutex.Unlock()
if i.containerId == "" {
return nil
}
timeout := int(time.Second.Seconds())
if err := i.cli.ContainerStop(context.Background(), containerId, container.StopOptions{Timeout: &timeout}); err != nil {
if err := i.cli.ContainerStop(context.Background(), i.containerId, container.StopOptions{
Signal: "SIGKILL",
}); err != nil {
return err
}
i.setContainerId("")
i.containerId = ""
i.resetPortMapping()
return nil
}
Expand All @@ -432,7 +436,7 @@ func (i *Instance) waitToBeAvailable(ctx context.Context) error {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
if err := i.isRunning(ctx, 0); err != nil {
if err := i.isRunning(ctx); err != nil {
return err
}
if err := i.checkAvailable(ctx); err != nil {
Expand All @@ -445,21 +449,15 @@ func (i *Instance) waitToBeAvailable(ctx context.Context) error {
}
}

func (i *Instance) isRunning(ctx context.Context, try int) error {
if try > 10 {
return errors.New("localstack container has been stopped")
}
containers, err := i.cli.ContainerList(ctx, types.ContainerListOptions{})
func (i *Instance) isRunning(ctx context.Context) error {
i.containerIdMutex.RLock()
defer i.containerIdMutex.RUnlock()
_, err := i.cli.ContainerInspect(ctx, i.containerId)
if err != nil {
return err
}
for _, c := range containers {
if c.Image == imageName {
return nil
}
i.log.Debug(err)
return errors.New("localstack container has been stopped")
}
time.Sleep(300 * time.Millisecond)
return i.isRunning(ctx, try+1)
return nil
}

func (i *Instance) checkAvailable(ctx context.Context) error {
Expand Down Expand Up @@ -512,16 +510,14 @@ func (i *Instance) getContainerId() string {
return i.containerId
}

func (i *Instance) setContainerId(v string) {
i.containerIdMutex.Lock()
defer i.containerIdMutex.Unlock()
i.containerId = v
func (i *Instance) resetPortMapping() {
i.savePortMappings(map[Service]string{})
}

func (i *Instance) resetPortMapping() {
func (i *Instance) savePortMappings(newMapping map[Service]string) {
i.portMappingMutex.Lock()
defer i.portMappingMutex.Unlock()
i.portMapping = map[Service]string{}
i.portMapping = newMapping
}

func (i *Instance) getPortMapping(service Service) string {
Expand Down
19 changes: 13 additions & 6 deletions localstack_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ import (
)

func TestInstance_Start_Fails(t *testing.T) {
for _, tt := range [...]struct {
t.Parallel()
for _, scenario := range [...]struct {
when string
given func(f *internalfakes.FakeDockerClient) *Instance
then func(t *testing.T, err error, f *internalfakes.FakeDockerClient)
Expand Down Expand Up @@ -108,7 +109,7 @@ func TestInstance_Start_Fails(t *testing.T) {
AttachStdout: true,
AttachStderr: true,
}, config)
pm := nat.PortMap{}
pm := make(nat.PortMap, len(AvailableServices))
for service := range AvailableServices {
pm[nat.Port(service.Port)] = []nat.PortBinding{{HostIP: "0.0.0.0", HostPort: ""}}
}
Expand Down Expand Up @@ -165,15 +166,18 @@ func TestInstance_Start_Fails(t *testing.T) {
},
},
} {
t.Run(tt.when, func(t *testing.T) {
s := scenario
t.Run(s.when, func(t *testing.T) {
t.Parallel()
f := &internalfakes.FakeDockerClient{}
f.ContainerLogsReturns(io.NopCloser(strings.NewReader("")), nil)
tt.then(t, tt.given(f).Start(), f)
s.then(t, s.given(f).Start(), f)
})
}
}

func TestInstance_StartWithContext_Fails_Stop_AfterTest(t *testing.T) {
t.Parallel()
f := &internalfakes.FakeDockerClient{}
ctx, cancel := context.WithCancel(context.Background())
cancel()
Expand All @@ -183,14 +187,16 @@ func TestInstance_StartWithContext_Fails_Stop_AfterTest(t *testing.T) {
}

func TestInstance_Stop_Fails(t *testing.T) {
t.Parallel()
f := &internalfakes.FakeDockerClient{}
f.ContainerStopReturns(errors.New("can't stop"))
i := &Instance{cli: f, log: logrus.StandardLogger(), containerId: "something"}
require.EqualError(t, i.Stop(), "can't stop")
}

func TestInstance_checkAvailable_Session_Fails(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
require.NoError(t, os.Setenv("AWS_STS_REGIONAL_ENDPOINTS", "FAILURE"))
defer func() {
Expand All @@ -201,7 +207,8 @@ func TestInstance_checkAvailable_Session_Fails(t *testing.T) {
}

func TestInstance_waitToBeAvailable_Context_Expired(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
cancel()
i := &Instance{log: logrus.StandardLogger()}
require.Error(t, i.waitToBeAvailable(ctx))
Expand Down
Loading

0 comments on commit 5aac2dd

Please sign in to comment.