Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add to agent exponential backoff for fetching new tasks #3206

Closed
wants to merge 9 commits into from
Closed
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 16 additions & 4 deletions agent/rpc/client_grpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,28 @@ import (
// set grpc version on compile time to compare against server version response
const ClientGrpcVersion int32 = proto.Version

// config for exponential backoff on grpc errors
const (
backOffInit = 10 * time.Millisecond
backOffMax = 10 * time.Second
)

type client struct {
client proto.WoodpeckerClient
conn *grpc.ClientConn
client proto.WoodpeckerClient
conn *grpc.ClientConn
pollMaxInterval time.Duration
pollInitialInterval time.Duration
}

// NewGrpcClient returns a new grpc Client.
func NewGrpcClient(conn *grpc.ClientConn) rpc.Peer {
client := new(client)
client.client = proto.NewWoodpeckerClient(conn)
client.conn = conn

client.pollInitialInterval = backOffInit
6543 marked this conversation as resolved.
Show resolved Hide resolved
client.pollMaxInterval = backOffMax

return client
}

Expand All @@ -53,8 +65,8 @@ func (c *client) Close() error {

func (c *client) newBackOff() backoff.BackOff {
b := backoff.NewExponentialBackOff()
b.MaxInterval = 10 * time.Second
b.InitialInterval = 10 * time.Millisecond
b.MaxInterval = c.pollMaxInterval
b.InitialInterval = c.pollInitialInterval
return b
}

Expand Down
63 changes: 49 additions & 14 deletions agent/runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import (
"sync"
"time"

"github.com/cenkalti/backoff/v4"
"github.com/rs/zerolog/log"
"github.com/tevino/abool/v2"
"google.golang.org/grpc/metadata"

"go.woodpecker-ci.org/woodpecker/v2/pipeline"
Expand All @@ -32,12 +32,24 @@ import (
"go.woodpecker-ci.org/woodpecker/v2/shared/utils"
)

// config for exponential backoff on next task fetch
const (
backOffInit = 10 * time.Millisecond
backOffMax = 10 * time.Second
)

type Runner struct {
client rpc.Peer
filter rpc.Filter
hostname string
counter *State
backend *backend.Backend
backOff backOff
}

type backOff struct {
pollMaxInterval time.Duration
pollInitialInterval time.Duration
}

func NewRunner(workEngine rpc.Peer, f rpc.Filter, h string, state *State, backend *backend.Backend) Runner {
Expand All @@ -47,10 +59,37 @@ func NewRunner(workEngine rpc.Peer, f rpc.Filter, h string, state *State, backen
hostname: h,
counter: state,
backend: backend,
backOff: backOff{
pollInitialInterval: backOffInit,
pollMaxInterval: backOffMax,
},
}
}

func (r *Runner) Run(runnerCtx context.Context) error {
retry := backoff.NewExponentialBackOff()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do you want to backoff exponential instead of linear here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because if you fetched a task last time its more likely that there are some next to come

Else in the worst case wait 10s

retry.MaxInterval = r.backOff.pollInitialInterval
retry.InitialInterval = r.backOff.pollMaxInterval

for {
select {
case <-runnerCtx.Done():
return runnerCtx.Err()
case <-time.After(retry.NextBackOff()):
log.Debug().Msg("polling new steps")
gotTask, err := r.runNextTask(runnerCtx)
if err != nil {
return err
}
if gotTask {
log.Debug().Msg("reset retry time, last task fetch got me work")
retry.Reset()
}
}
}
}

func (r *Runner) runNextTask(runnerCtx context.Context) (bool, error) {
log.Debug().Msg("request next execution")

meta, _ := metadata.FromOutgoingContext(runnerCtx)
Expand All @@ -59,10 +98,10 @@ func (r *Runner) Run(runnerCtx context.Context) error {
// get the next workflow from the queue
work, err := r.client.Next(runnerCtx, r.filter)
Copy link
Member

@anbraten anbraten Jan 15, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The server is blocking Next requests until it has a job, so there should be no need to backoff here.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well it does not ...

if err != nil {
return err
return false, err
}
if work == nil {
return nil
return false, nil
}

timeout := time.Hour
Expand All @@ -89,8 +128,8 @@ func (r *Runner) Run(runnerCtx context.Context) error {

logger.Debug().Msg("received execution")

workflowCtx, cancel := context.WithTimeout(ctxmeta, timeout)
defer cancel()
workflowCtx, cancelWorkflowCtx := context.WithTimeout(ctxmeta, timeout)
defer cancelWorkflowCtx()

// Add sigterm support for internal context.
// Required when the pipeline is terminated by external signals
Expand All @@ -99,15 +138,11 @@ func (r *Runner) Run(runnerCtx context.Context) error {
logger.Error().Msg("Received sigterm termination signal")
})

canceled := abool.New()
go func() {
logger.Debug().Msg("listen for cancel signal")

if werr := r.client.Wait(workflowCtx, work.ID); werr != nil {
canceled.SetTo(true)
logger.Warn().Err(werr).Msg("cancel signal received")

cancel()
cancelWorkflowCtx()
} else {
logger.Debug().Msg("stop listening for cancel signal")
}
Expand Down Expand Up @@ -156,7 +191,7 @@ func (r *Runner) Run(runnerCtx context.Context) error {
state.Finished = time.Now().Unix()
state.Exited = true

if canceled.IsSet() {
if workflowCtx.Err() != nil {
state.Error = ""
state.ExitCode = 137
} else if err != nil {
Expand All @@ -167,7 +202,7 @@ func (r *Runner) Run(runnerCtx context.Context) error {
case errors.Is(err, pipeline.ErrCancel):
state.Error = ""
state.ExitCode = 137
canceled.SetTo(true)
cancelWorkflowCtx()
default:
state.ExitCode = 1
state.Error = err.Error()
Expand All @@ -177,7 +212,7 @@ func (r *Runner) Run(runnerCtx context.Context) error {
logger.Debug().
Str("error", state.Error).
Int("exit_code", state.ExitCode).
Bool("canceled", canceled.IsSet()).
Bool("canceled", workflowCtx.Err() != nil).
Msg("pipeline complete")

logger.Debug().Msg("uploading logs")
Expand All @@ -195,7 +230,7 @@ func (r *Runner) Run(runnerCtx context.Context) error {
logger.Debug().Msg("updating pipeline status complete")
}

return nil
return true, nil
}

// extract repository name from the configuration
Expand Down
41 changes: 19 additions & 22 deletions cmd/agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ import (
"go.woodpecker-ci.org/woodpecker/v2/version"
)

const healthReportTimes = time.Second * 10

func run(c *cli.Context) error {
agentConfigPath := c.String("agent-config")
hostname := c.String("hostname")
Expand Down Expand Up @@ -113,7 +115,6 @@ func run(c *cli.Context) error {

client := agentRpc.NewGrpcClient(conn)

sigterm := abool.New()
ctx := metadata.NewOutgoingContext(
context.Background(),
metadata.Pairs("hostname", hostname),
Expand All @@ -122,7 +123,6 @@ func run(c *cli.Context) error {
agentConfigPersisted := abool.New()
ctx = utils.WithContextSigtermCallback(ctx, func() {
log.Info().Msg("termination signal is received, shutting down")
sigterm.Set()

// Remove stateless agents from server
if agentConfigPersisted.IsNotSet() {
Expand Down Expand Up @@ -203,18 +203,23 @@ func run(c *cli.Context) error {

go func() {
for {
if sigterm.IsSet() {
log.Debug().Msg("terminating health reporting")
select {
case <-ctx.Done():
if err := ctx.Err(); err != nil && !errors.Is(err, &utils.ErrSignalReceived{}) {
log.Error().Err(err).Msgf("context closed with unexpected error")
} else {
log.Debug().Msg("terminating health reporting")
}
return
}

err := client.ReportHealth(ctx)
if err != nil {
log.Err(err).Msg("failed to report health")
return
}
default:
if err := client.ReportHealth(ctx); err != nil {
log.Err(err).Msg("failed to report health")
return
}

<-time.After(time.Second * 10)
<-time.After(healthReportTimes)
}
}
}()

Expand All @@ -226,17 +231,9 @@ func run(c *cli.Context) error {
r := agent.NewRunner(client, filter, hostname, counter, &backendEngine)
log.Debug().Msgf("created new runner %d", i)

for {
if sigterm.IsSet() {
log.Debug().Msgf("terminating runner %d", i)
return
}

log.Debug().Msg("polling new steps")
if err := r.Run(ctx); err != nil {
log.Error().Err(err).Msg("pipeline done with error")
return
}
if err := r.Run(ctx); err != nil && !errors.Is(err, &utils.ErrSignalReceived{}) {
log.Error().Err(err).Msg("runner done with error")
return
}
}()
}
Expand Down
15 changes: 14 additions & 1 deletion shared/utils/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,19 @@ import (
"syscall"
)

type ErrSignalReceived struct {
signal string
}

func (err *ErrSignalReceived) Error() string {
return fmt.Sprintf("received signal: %s", err.signal)
}

func (*ErrSignalReceived) Is(target error) bool {
_, ok := target.(*ErrSignalReceived) //nolint:errorlint
return ok
}

// Returns a copy of parent context that is canceled when
// an os interrupt signal is received.
func WithContextSigtermCallback(ctx context.Context, f func()) context.Context {
Expand All @@ -37,7 +50,7 @@ func WithContextSigtermCallback(ctx context.Context, f func()) context.Context {
if f != nil {
f()
}
cancel(fmt.Errorf("received signal: %v", receivedSignal))
cancel(&ErrSignalReceived{signal: fmt.Sprint(receivedSignal)})
}
}()

Expand Down