diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..b44bb07e4 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Code of Conduct + +Refer to our [Code of Conduct](https://github.com/tinkerbell/.github/blob/master/CODE_OF_CONDUCT.md) diff --git a/OWNERS.md b/OWNERS.md new file mode 100644 index 000000000..c37a2a0b8 --- /dev/null +++ b/OWNERS.md @@ -0,0 +1,3 @@ +#OWNERS + +Please see [https://github.com/tinkerbell/.github/blob/master/OWNERS.md](https://github.com/tinkerbell/.github/blob/master/OWNERS.md). diff --git a/README.md b/README.md index 52a6692ad..9a3987779 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ [![Build Status](https://cloud.drone.io/api/badges/tinkerbell/tink/status.svg)](https://cloud.drone.io/tinkerbell/tink) [![codecov](https://codecov.io/gh/tinkerbell/tink/branch/master/graph/badge.svg)](https://codecov.io/gh/tinkerbell/tink) +![](https://img.shields.io/badge/Stability-Experimental-red.svg) + +This repository is [Experimental](https://github.com/packethost/standards/blob/master/experimental-statement.md) meaning that it's based on untested ideas or techniques and not yet established or finalized or involves a radically new and innovative style! This means that support is best effort (at best!) and we strongly encourage you to NOT use this in production. It is comprised of following five major components: diff --git a/cmd/tink-cli/cmd/template/create.go b/cmd/tink-cli/cmd/template/create.go index 33c0075cb..12e3c03e1 100644 --- a/cmd/tink-cli/cmd/template/create.go +++ b/cmd/tink-cli/cmd/template/create.go @@ -43,7 +43,7 @@ cat /tmp/example.tmpl | tink template create -n example`, } else { f, err := os.Open(filePath) if err != nil { - log.Println(err) + log.Fatal(err) } reader = f } @@ -51,8 +51,7 @@ cat /tmp/example.tmpl | tink template create -n example`, data := readAll(reader) if data != nil { if err := tryParseTemplate(string(data)); err != nil { - log.Println(err) - return + log.Fatal(err) } createTemplate(data) } @@ -62,7 +61,7 @@ cat /tmp/example.tmpl | tink template create -n example`, func readAll(reader io.Reader) []byte { data, err := ioutil.ReadAll(reader) if err != nil { - log.Println(err) + log.Fatal(err) } return data } @@ -86,8 +85,7 @@ func createTemplate(data []byte) { req := template.WorkflowTemplate{Name: templateName, Data: string(data)} res, err := client.TemplateClient.CreateTemplate(context.Background(), &req) if err != nil { - log.Println(err) - return + log.Fatal(err) } fmt.Println("Created Template: ", res.Id) } diff --git a/cmd/tink-cli/cmd/template/update.go b/cmd/tink-cli/cmd/template/update.go index e0b70f1be..1e07b9735 100644 --- a/cmd/tink-cli/cmd/template/update.go +++ b/cmd/tink-cli/cmd/template/update.go @@ -52,8 +52,7 @@ func updateTemplate(id string) { data := readTemplateData() if data != "" { if err := tryParseTemplate(data); err != nil { - log.Println(err) - return + log.Fatal(err) } req.Data = data } @@ -72,13 +71,13 @@ func updateTemplate(id string) { func readTemplateData() string { f, err := os.Open(filePath) if err != nil { - log.Println(err) + log.Fatal(err) } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { - log.Println(err) + log.Fatal(err) } return string(data) } diff --git a/cmd/tink-server/main.go b/cmd/tink-server/main.go index 3ca24430b..20db5a252 100644 --- a/cmd/tink-server/main.go +++ b/cmd/tink-server/main.go @@ -19,14 +19,13 @@ var ( ) func main() { - log, cleanup, err := log.Init("github.com/tinkerbell/tink") + log, err := log.Init("github.com/tinkerbell/tink") if err != nil { panic(err) } logger = log - defer cleanup() - + defer logger.Close() log.Info("starting version " + version) ctx, closer := context.WithCancel(context.Background()) diff --git a/cmd/tink-worker/action.go b/cmd/tink-worker/action.go index 014bf884d..bdb2460f9 100644 --- a/cmd/tink-worker/action.go +++ b/cmd/tink-worker/action.go @@ -8,33 +8,42 @@ import ( "fmt" "io" "os" - "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" + "github.com/packethost/pkg/log" "github.com/pkg/errors" - "github.com/sirupsen/logrus" pb "github.com/tinkerbell/tink/protos/workflow" ) var ( registry string cli *client.Client - log *logrus.Entry ) -func executeAction(ctx context.Context, action *pb.WorkflowAction, wfID string) (string, pb.ActionState, error) { - log = logger.WithFields(logrus.Fields{"workflow_id": wfID, "worker_id": action.GetWorkerId()}) +const ( + errCreateContainer = "failed to create container" + errRemoveContainer = "failed to remove container" + errFailedToWait = "failed to wait for completion of action" + errFailedToRunCmd = "failed to run on-timeout command" + + infoWaitFinished = "wait finished for failed or timeout container" +) + +func executeAction(ctx context.Context, action *pb.WorkflowAction, wfID string) (pb.ActionState, error) { + l := logger.With("workflowID", wfID, "workerID", action.GetWorkerId(), "actionName", action.GetName(), "actionImage", action.GetImage()) err := pullActionImage(ctx, action) if err != nil { - return fmt.Sprintf("Failed to pull Image : %s", action.GetImage()), 1, errors.Wrap(err, "DOCKER PULL") + return pb.ActionState_ACTION_IN_PROGRESS, errors.Wrap(err, "DOCKER PULL") } - id, err := createContainer(ctx, action, action.Command, wfID) + id, err := createContainer(ctx, l, action, action.Command, wfID) if err != nil { - return "Failed to create container", 1, errors.Wrap(err, "DOCKER CREATE") + return pb.ActionState_ACTION_IN_PROGRESS, errors.Wrap(err, "DOCKER CREATE") } + l.With("containerID", id, "command", action.GetOnTimeout()).Info("container created") + var timeCtx context.Context var cancel context.CancelFunc if action.Timeout > 0 { @@ -43,11 +52,9 @@ func executeAction(ctx context.Context, action *pb.WorkflowAction, wfID string) timeCtx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) } defer cancel() - //run container with timeout context - //startedAt := time.Now() - err = runContainer(timeCtx, id) + err = startContainer(timeCtx, l, id) if err != nil { - return "Failed to run container", 1, errors.Wrap(err, "DOCKER RUN") + return pb.ActionState_ACTION_IN_PROGRESS, errors.Wrap(err, "DOCKER RUN") } failedActionStatus := make(chan pb.ActionState) @@ -55,67 +62,69 @@ func executeAction(ctx context.Context, action *pb.WorkflowAction, wfID string) //capturing logs of action container in a go-routine go captureLogs(ctx, id) - status, err, werr := waitContainer(timeCtx, id) - if werr != nil { - rerr := removeContainer(ctx, id) + status, err := waitContainer(timeCtx, id) + if err != nil { + rerr := removeContainer(ctx, l, id) if rerr != nil { - log.WithField("container_id", id).Errorln("Failed to remove container as ", rerr) + rerr = errors.Wrap(rerr, errRemoveContainer) + l.With("containerID", id).Error(rerr) + return status, rerr } - return "Failed to wait for completion of action", status, errors.Wrap(err, "DOCKER_WAIT") + return status, errors.Wrap(err, "DOCKER_WAIT") } - rerr := removeContainer(ctx, id) + rerr := removeContainer(ctx, l, id) if rerr != nil { - return "Failed to remove container of action", status, errors.Wrap(rerr, "DOCKER_REMOVE") + return status, errors.Wrap(rerr, "DOCKER_REMOVE") } - log.Infoln("Container removed with Status ", pb.ActionState(status)) + l.With("status", status.String()).Info("container removed") if status != pb.ActionState_ACTION_SUCCESS { if status == pb.ActionState_ACTION_TIMEOUT && action.OnTimeout != nil { - id, err = createContainer(ctx, action, action.OnTimeout, wfID) + id, err = createContainer(ctx, l, action, action.OnTimeout, wfID) if err != nil { - log.Errorln("Failed to create container for on-timeout command: ", err) + l.Error(errors.Wrap(err, errCreateContainer)) } - log.Infoln("Container created with on-timeout command : ", action.OnTimeout) + l.With("containerID", id, "status", status.String(), "command", action.GetOnTimeout()).Info("container created") failedActionStatus := make(chan pb.ActionState) go captureLogs(ctx, id) go waitFailedContainer(ctx, id, failedActionStatus) - err = runContainer(ctx, id) + err = startContainer(ctx, l, id) if err != nil { - log.Errorln("Failed to run on-timeout command: ", err) + l.Error(errors.Wrap(err, errFailedToRunCmd)) } onTimeoutStatus := <-failedActionStatus - log.Infoln("On-Timeout Container status : ", onTimeoutStatus) + l.With("status", onTimeoutStatus).Info("action timeout") } else { if action.OnFailure != nil { - id, err = createContainer(ctx, action, action.OnFailure, wfID) + id, err = createContainer(ctx, l, action, action.OnFailure, wfID) if err != nil { - log.Errorln("Failed to create on-failure command: ", err) + l.Error(errors.Wrap(err, errFailedToRunCmd)) } - log.Infoln("Container created with on-failure command : ", action.OnFailure) + l.With("containerID", id, "actionStatus", status.String(), "command", action.GetOnFailure()).Info("container created") go captureLogs(ctx, id) go waitFailedContainer(ctx, id, failedActionStatus) - err = runContainer(ctx, id) + err = startContainer(ctx, l, id) if err != nil { - log.Errorln("Failed to run on-failure command: ", err) + l.Error(errors.Wrap(err, errFailedToRunCmd)) } onFailureStatus := <-failedActionStatus - log.Infoln("on-failure Container status : ", onFailureStatus) + l.With("status", onFailureStatus).Info("action failed") } } - log.Infoln("Wait finished for failed or timeout container") + l.Info(infoWaitFinished) if err != nil { - rerr := removeContainer(ctx, id) + rerr := removeContainer(ctx, l, id) if rerr != nil { - log.Errorln("Failed to remove container as ", rerr) + l.Error(errors.Wrap(rerr, errRemoveContainer)) } - log.Infoln("Failed to wait for container : ", err) + l.Error(errors.Wrap(err, errFailedToWait)) } - rerr = removeContainer(ctx, id) + rerr = removeContainer(ctx, l, id) if rerr != nil { - log.Errorln("Failed to remove container as ", rerr) + l.Error(errors.Wrap(rerr, errRemoveContainer)) } } - log.Infoln("Action container exits with status code ", status) - return "Successful Execution", status, nil + l.With("status", status).Info("action container exited") + return status, nil } func captureLogs(ctx context.Context, id string) { @@ -165,7 +174,7 @@ func pullActionImage(ctx context.Context, action *pb.WorkflowAction) error { return nil } -func createContainer(ctx context.Context, action *pb.WorkflowAction, cmd []string, wfID string) (string, error) { +func createContainer(ctx context.Context, l log.Logger, action *pb.WorkflowAction, cmd []string, wfID string) (string, error) { config := &container.Config{ Image: registry + "/" + action.GetImage(), AttachStdout: true, @@ -173,7 +182,6 @@ func createContainer(ctx context.Context, action *pb.WorkflowAction, cmd []strin Tty: true, Env: action.GetEnvironment(), } - if cmd != nil { config.Cmd = cmd } @@ -184,8 +192,7 @@ func createContainer(ctx context.Context, action *pb.WorkflowAction, cmd []strin Binds: []string{wfDir + ":/workflow"}, } hostConfig.Binds = append(hostConfig.Binds, action.GetVolumes()...) - - log.Infoln("Starting the container with cmd", cmd) + l.With("command", cmd).Info("creating container") resp, err := cli.ContainerCreate(ctx, config, hostConfig, nil, action.GetName()) if err != nil { return "", errors.Wrap(err, "DOCKER CREATE") @@ -193,8 +200,8 @@ func createContainer(ctx context.Context, action *pb.WorkflowAction, cmd []strin return resp.ID, nil } -func runContainer(ctx context.Context, id string) error { - log.Debugln("run Container with ID : ", id) +func startContainer(ctx context.Context, l log.Logger, id string) error { + l.With("containerID", id).Debug("starting container") err := cli.ContainerStart(ctx, id, types.ContainerStartOptions{}) if err != nil { return errors.Wrap(err, "DOCKER START") @@ -202,63 +209,54 @@ func runContainer(ctx context.Context, id string) error { return nil } -func waitContainer(ctx context.Context, id string) (pb.ActionState, error, error) { +func waitContainer(ctx context.Context, id string) (pb.ActionState, error) { // Inspect whether the container is in running state - inspect, err := cli.ContainerInspect(ctx, id) + _, err := cli.ContainerInspect(ctx, id) if err != nil { - log.Debugln("Container does not exists") - return pb.ActionState_ACTION_FAILED, nil, nil - } - if inspect.ContainerJSONBase.State.Running { - log.Debugln("Container with id : ", id, " is in running state") - //return pb.ActionState_ACTION_FAILED, nil, nil + return pb.ActionState_ACTION_FAILED, nil } + // send API call to wait for the container completion - log.Debugln("Starting Container wait for id : ", id) wait, errC := cli.ContainerWait(ctx, id, container.WaitConditionNotRunning) select { case status := <-wait: - log.Infoln("Container with id ", id, "finished with status code : ", status.StatusCode) if status.StatusCode == 0 { - return pb.ActionState_ACTION_SUCCESS, nil, nil + return pb.ActionState_ACTION_SUCCESS, nil } - return pb.ActionState_ACTION_FAILED, nil, nil + return pb.ActionState_ACTION_FAILED, nil case err := <-errC: - log.Errorln("Container wait failed for id : ", id, " Error : ", err) - return pb.ActionState_ACTION_FAILED, nil, err + return pb.ActionState_ACTION_FAILED, err case <-ctx.Done(): - log.Errorln("Container wait for id : ", id, " is timedout Error : ", err) - return pb.ActionState_ACTION_TIMEOUT, ctx.Err(), nil + return pb.ActionState_ACTION_TIMEOUT, ctx.Err() } } func waitFailedContainer(ctx context.Context, id string, failedActionStatus chan pb.ActionState) { // send API call to wait for the container completion - log.Debugln("Starting Container wait for id : ", id) wait, errC := cli.ContainerWait(ctx, id, container.WaitConditionNotRunning) select { case status := <-wait: - log.Infoln("Container with id ", id, "finished with status code : ", status.StatusCode) if status.StatusCode == 0 { failedActionStatus <- pb.ActionState_ACTION_SUCCESS } failedActionStatus <- pb.ActionState_ACTION_FAILED case err := <-errC: - log.Errorln("Container wait failed for id : ", id, " Error : ", err) + logger.Error(err) failedActionStatus <- pb.ActionState_ACTION_FAILED } } -func removeContainer(ctx context.Context, id string) error { +func removeContainer(ctx context.Context, l log.Logger, id string) error { // create options for removing container opts := types.ContainerRemoveOptions{ Force: true, RemoveLinks: false, RemoveVolumes: true, } - log.Debugln("Start removing container ", id) + l.With("containerID", id).Info("removing container") + // send API call to remove the container err := cli.ContainerRemove(ctx, id, opts) if err != nil { @@ -276,35 +274,5 @@ func initializeDockerClient() (*client.Client, error) { if err != nil { return nil, errors.Wrap(err, "DOCKER CLIENT") } - - logger.SetFormatter(&logrus.JSONFormatter{}) return c, nil } - -func initializeLogger() { - level := os.Getenv("WORKER_LOG_LEVEL") - if level != "" { - switch strings.ToLower(level) { - case "panic": - logger.SetLevel(logrus.PanicLevel) - case "fatal": - logger.SetLevel(logrus.FatalLevel) - case "error": - logger.SetLevel(logrus.ErrorLevel) - case "warn", "warning": - logger.SetLevel(logrus.WarnLevel) - case "info": - logger.SetLevel(logrus.InfoLevel) - case "debug": - logger.SetLevel(logrus.DebugLevel) - case "trace": - logger.SetLevel(logrus.TraceLevel) - default: - logger.SetLevel(logrus.InfoLevel) - logger.Warningln("Invalid value for WORKER_LOG_LEVEL", level, " .Setting it to default(Info)") - } - } else { - logger.SetLevel(logrus.InfoLevel) - logger.Warningln("Variable WORKER_LOG_LEVEL is not set. Default is Info") - } -} diff --git a/cmd/tink-worker/main.go b/cmd/tink-worker/main.go index 8783926fc..3259e2537 100644 --- a/cmd/tink-worker/main.go +++ b/cmd/tink-worker/main.go @@ -5,7 +5,8 @@ import ( "strconv" "time" - "github.com/sirupsen/logrus" + "github.com/packethost/pkg/log" + "github.com/pkg/errors" "github.com/tinkerbell/tink/client" pb "github.com/tinkerbell/tink/protos/workflow" "google.golang.org/grpc" @@ -14,36 +15,46 @@ import ( const ( retryIntervalDefault = 3 retryCountDefault = 3 + + serviceKey = "github.com/tinkerbell/tink" + invalidRetryInterval = "invalid RETRY_INTERVAL, using default (seconds)" + invalidMaxRetry = "invalid MAX_RETRY, using default" + + errWorker = "worker finished with error" ) var ( rClient pb.WorkflowSvcClient retryInterval time.Duration retries int -) + logger log.Logger -var ( // version is set at build time version = "devel" - - logger = logrus.New() ) func main() { - initializeLogger() - logger.Debug("Starting version " + version) + log, err := log.Init(serviceKey) + if err != nil { + panic(err) + } + logger = log + defer logger.Close() + log.With("version", version).Info("starting") setupRetry() if setupErr := client.Setup(); setupErr != nil { - logger.Fatalln(setupErr) + log.Error(setupErr) + os.Exit(1) } conn, err := tryClientConnection() if err != nil { - logger.Fatalln(err) + log.Error(err) + os.Exit(1) } rClient = pb.NewWorkflowSvcClient(conn) err = processWorkflowActions(rClient) if err != nil { - logger.Errorln("worker Finished with error", err) + log.Error(errors.Wrap(err, errWorker)) } } @@ -53,8 +64,7 @@ func tryClientConnection() (*grpc.ClientConn, error) { c, e := client.GetConnection() if e != nil { err = e - logger.Errorln(err) - logger.Errorf("retrying after %v seconds", retryInterval) + logger.With("error", err, "duration", retryInterval).Info("failed to connect, sleeping before retrying") <-time.After(retryInterval * time.Second) continue } @@ -66,12 +76,12 @@ func tryClientConnection() (*grpc.ClientConn, error) { func setupRetry() { interval := os.Getenv("RETRY_INTERVAL") if interval == "" { - logger.Infof("RETRY_INTERVAL not set. Using default, %d seconds\n", retryIntervalDefault) + logger.With("default", retryIntervalDefault).Info("RETRY_INTERVAL not set") retryInterval = retryIntervalDefault } else { interval, err := time.ParseDuration(interval) if err != nil { - logger.Warningf("Invalid RETRY_INTERVAL set. Using default, %d seconds.\n", retryIntervalDefault) + logger.With("default", retryIntervalDefault).Info(invalidRetryInterval) retryInterval = retryIntervalDefault } else { retryInterval = interval @@ -80,12 +90,12 @@ func setupRetry() { maxRetry := os.Getenv("MAX_RETRY") if maxRetry == "" { - logger.Infof("MAX_RETRY not set. Using default, %d retries.\n", retryCountDefault) + logger.With("default", retryCountDefault).Info("MAX_RETRY not set") retries = retryCountDefault } else { max, err := strconv.Atoi(maxRetry) if err != nil { - logger.Warningf("Invalid MAX_RETRY set. Using default, %d retries.\n", retryCountDefault) + logger.With("default", retryCountDefault).Info(invalidMaxRetry) retries = retryCountDefault } else { retries = max diff --git a/cmd/tink-worker/worker.go b/cmd/tink-worker/worker.go index dfa8f5e46..c4c88d0fb 100644 --- a/cmd/tink-worker/worker.go +++ b/cmd/tink-worker/worker.go @@ -12,7 +12,8 @@ import ( "strings" "time" - "github.com/sirupsen/logrus" + "github.com/packethost/pkg/log" + "github.com/pkg/errors" pb "github.com/tinkerbell/tink/protos/workflow" "google.golang.org/grpc/status" ) @@ -22,6 +23,12 @@ const ( dataDir = "/worker" maxFileSize = "MAX_FILE_SIZE" // in bytes defaultMaxFileSize int64 = 10485760 //10MB ~= 10485760Bytes + + errGetWfContext = "failed to get workflow context" + errGetWfActions = "failed to get actions for workflow" + errReportActionStatus = "failed to report action status" + + msgTurn = "it's turn for a different worker: %s" ) var ( @@ -31,19 +38,19 @@ var ( // WorkflowMetadata is the metadata related to workflow data type WorkflowMetadata struct { - WorkerID string `json:"worker-id"` - Action string `json:"action-name"` - Task string `json:"task-name"` - UpdatedAt time.Time `json:"updated-at"` + WorkerID string `json:"workerID"` + Action string `json:"actionName"` + Task string `json:"taskName"` + UpdatedAt time.Time `json:"updatedAt"` SHA string `json:"sha256"` } func processWorkflowActions(client pb.WorkflowSvcClient) error { workerID := os.Getenv("WORKER_ID") if workerID == "" { - return fmt.Errorf("required WORKER_ID") + return errors.New("required WORKER_ID") } - log = logger.WithField("worker_id", workerID) + ctx := context.Background() var err error cli, err = initializeDockerClient() @@ -51,15 +58,17 @@ func processWorkflowActions(client pb.WorkflowSvcClient) error { return err } for { + l := logger.With("workerID", workerID) res, err := client.GetWorkflowContexts(ctx, &pb.WorkflowContextRequest{WorkerId: workerID}) if err != nil { - fmt.Println("failed to get context") + return errors.Wrap(err, errGetWfContext) } for wfContext, err := res.Recv(); err == nil && wfContext != nil; wfContext, err = res.Recv() { wfID := wfContext.GetWorkflowId() + l = l.With("workflowID", wfID) actions, err := client.GetWorkflowActions(ctx, &pb.WorkflowActionsRequest{WorkflowId: wfID}) if err != nil { - return fmt.Errorf("can't find actions for workflow %s", wfID) + return errors.Wrap(err, errGetWfActions) } turn := false @@ -74,22 +83,27 @@ func processWorkflowActions(client pb.WorkflowSvcClient) error { switch wfContext.GetCurrentActionState() { case pb.ActionState_ACTION_SUCCESS: if isLastAction(wfContext, actions) { - log.Infof("Workflow %s completed successfully\n", wfID) continue } nextAction = actions.GetActionList()[wfContext.GetCurrentActionIndex()+1] actionIndex = int(wfContext.GetCurrentActionIndex()) + 1 case pb.ActionState_ACTION_FAILED: - log.Infof("Workflow %s Failed\n", wfID) continue case pb.ActionState_ACTION_TIMEOUT: - log.Infof("Workflow %s Timeout\n", wfID) continue default: - log.Infof("Current context %s\n", wfContext) nextAction = actions.GetActionList()[wfContext.GetCurrentActionIndex()] actionIndex = int(wfContext.GetCurrentActionIndex()) } + l := l.With( + "currentWorker", wfContext.GetCurrentWorker(), + "currentTask", wfContext.GetCurrentTask(), + "currentAction", wfContext.GetCurrentAction(), + "currentActionIndex", strconv.FormatInt(wfContext.GetCurrentActionIndex(), 10), + "currentActionState", wfContext.GetCurrentActionState(), + "totalNumberOfActions", wfContext.GetTotalNumberOfActions(), + ) + l.Info("current context") if nextAction.GetWorkerId() == workerID { turn = true } @@ -97,28 +111,37 @@ func processWorkflowActions(client pb.WorkflowSvcClient) error { if turn { wfDir := dataDir + string(os.PathSeparator) + wfID + l := l.With("actionName", actions.GetActionList()[actionIndex].GetName(), + "taskName", actions.GetActionList()[actionIndex].GetTaskName(), + ) if _, err := os.Stat(wfDir); os.IsNotExist(err) { err := os.Mkdir(wfDir, os.FileMode(0755)) if err != nil { - log.Fatal(err) + l.Error(err) + os.Exit(1) } - f := openDataFile(wfDir) + f := openDataFile(wfDir, l) _, err = f.Write([]byte("{}")) if err != nil { - log.Fatal(err) + l.Error(err) + os.Exit(1) } f.Close() if err != nil { - log.Fatal(err) + l.Error(err) + os.Exit(1) } } - log.Printf("Starting with action %s\n", actions.GetActionList()[actionIndex]) + l.Info("starting with action") } for turn { action := actions.GetActionList()[actionIndex] + l := l.With("actionName", action.GetName(), + "taskName", action.GetTaskName(), + ) if wfContext.GetCurrentActionState() != pb.ActionState_ACTION_IN_PROGRESS { actionStatus := &pb.WorkflowActionStatus{ WorkflowId: wfID, @@ -129,20 +152,20 @@ func processWorkflowActions(client pb.WorkflowSvcClient) error { Message: "Started execution", WorkerId: action.GetWorkerId(), } + err := reportActionStatus(ctx, client, actionStatus) if err != nil { - exitWithGrpcError(err) + exitWithGrpcError(err, l) } - log.WithField("action_name", actionStatus.ActionName).Infoln("Sent action status ", actionStatus.ActionStatus) - log.Debugf("Sent action status %s\n", actionStatus) + l.With("duration", strconv.FormatInt(actionStatus.Seconds, 10)).Info("sent action status") } // get workflow data - getWorkflowData(ctx, client, wfID) + getWorkflowData(ctx, client, workerID, wfID) // start executing the action start := time.Now() - _, status, err := executeAction(ctx, actions.GetActionList()[actionIndex], wfID) + status, err := executeAction(ctx, actions.GetActionList()[actionIndex], wfID) elapsed := time.Since(start) actionStatus := &pb.WorkflowActionStatus{ @@ -155,43 +178,41 @@ func processWorkflowActions(client pb.WorkflowSvcClient) error { if err != nil || status != pb.ActionState_ACTION_SUCCESS { if status == pb.ActionState_ACTION_TIMEOUT { - log.WithFields(logrus.Fields{"action": action.GetName(), "Task": action.GetTaskName()}).Errorln("Action timed out") actionStatus.ActionStatus = pb.ActionState_ACTION_TIMEOUT - actionStatus.Message = "Action Timed out" } else { - log.WithFields(logrus.Fields{"action": action.GetName(), "Task": action.GetTaskName()}).Errorln("Action Failed") actionStatus.ActionStatus = pb.ActionState_ACTION_FAILED - actionStatus.Message = "Action Failed" } + l.With("actionStatus", actionStatus.ActionStatus.String()) + l.Error(err) rerr := reportActionStatus(ctx, client, actionStatus) if rerr != nil { - exitWithGrpcError(rerr) + exitWithGrpcError(rerr, l) } delete(workflowcontexts, wfID) return err } actionStatus.ActionStatus = pb.ActionState_ACTION_SUCCESS - actionStatus.Message = "Finished Execution Successfully" + actionStatus.Message = "finished execution successfully" err = reportActionStatus(ctx, client, actionStatus) if err != nil { - exitWithGrpcError(err) + exitWithGrpcError(err, l) } - log.Infof("Sent action status %s\n", actionStatus) + l.Info("sent action status") // send workflow data, if updated updateWorkflowData(ctx, client, actionStatus) if len(actions.GetActionList()) == actionIndex+1 { - log.Infoln("Reached to end of workflow") + l.Info("reached to end of workflow") delete(workflowcontexts, wfID) turn = false break } nextAction := actions.GetActionList()[actionIndex+1] if nextAction.GetWorkerId() != workerID { - log.Debugf("Different worker has turn %s\n", nextAction.GetWorkerId()) + l.Debug(fmt.Sprintf(msgTurn, nextAction.GetWorkerId())) turn = false } else { actionIndex = actionIndex + 1 @@ -203,10 +224,10 @@ func processWorkflowActions(client pb.WorkflowSvcClient) error { } } -func exitWithGrpcError(err error) { +func exitWithGrpcError(err error, l log.Logger) { if err != nil { errStatus, _ := status.FromError(err) - log.WithField("Error code : ", errStatus.Code()).Errorln(errStatus.Message()) + l.With("errorCode", errStatus.Code()).Error(err) os.Exit(1) } } @@ -216,12 +237,17 @@ func isLastAction(wfContext *pb.WorkflowContext, actions *pb.WorkflowActionList) } func reportActionStatus(ctx context.Context, client pb.WorkflowSvcClient, actionStatus *pb.WorkflowActionStatus) error { + l := logger.With("workflowID", actionStatus.GetWorkflowId, + "workerID", actionStatus.GetWorkerId(), + "actionName", actionStatus.GetActionName(), + "taskName", actionStatus.GetTaskName(), + ) var err error for r := 1; r <= retries; r++ { _, err = client.ReportActionStatus(ctx, actionStatus) if err != nil { - log.Errorln("Report action status to server failed as : ", err) - log.Errorf("Retrying after %v seconds", retryInterval) + l.Error(errors.Wrap(err, errReportActionStatus)) + l.With("default", retryIntervalDefault).Info("RETRY_INTERVAL not set") <-time.After(retryInterval * time.Second) continue } @@ -230,21 +256,23 @@ func reportActionStatus(ctx context.Context, client pb.WorkflowSvcClient, action return err } -func getWorkflowData(ctx context.Context, client pb.WorkflowSvcClient, workflowID string) { +func getWorkflowData(ctx context.Context, client pb.WorkflowSvcClient, workerID, workflowID string) { + l := logger.With("workflowID", workflowID, + "workerID", workerID, + ) res, err := client.GetWorkflowData(ctx, &pb.GetWorkflowDataRequest{WorkflowID: workflowID}) if err != nil { - log.Fatal(err) + l.Error(err) } if len(res.GetData()) != 0 { - log.Debugf("Data received: %x", res.GetData()) wfDir := dataDir + string(os.PathSeparator) + workflowID - f := openDataFile(wfDir) + f := openDataFile(wfDir, l) defer f.Close() _, err := f.Write(res.GetData()) if err != nil { - log.Fatal(err) + l.Error(err) } h := sha.New() workflowDataSHA[workflowID] = base64.StdEncoding.EncodeToString(h.Sum(res.Data)) @@ -252,16 +280,21 @@ func getWorkflowData(ctx context.Context, client pb.WorkflowSvcClient, workflowI } func updateWorkflowData(ctx context.Context, client pb.WorkflowSvcClient, actionStatus *pb.WorkflowActionStatus) { + l := logger.With("workflowID", actionStatus.GetWorkflowId, + "workerID", actionStatus.GetWorkerId(), + "actionName", actionStatus.GetActionName(), + "taskName", actionStatus.GetTaskName(), + ) wfDir := dataDir + string(os.PathSeparator) + actionStatus.GetWorkflowId() - f := openDataFile(wfDir) + f := openDataFile(wfDir, l) defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { - log.Fatal(err) + l.Error(err) } - if isValidDataFile(f, data) { + if isValidDataFile(f, data, l) { h := sha.New() if _, ok := workflowDataSHA[actionStatus.GetWorkflowId()]; !ok { checksum := base64.StdEncoding.EncodeToString(h.Sum(data)) @@ -277,6 +310,11 @@ func updateWorkflowData(ctx context.Context, client pb.WorkflowSvcClient, action } func sendUpdate(ctx context.Context, client pb.WorkflowSvcClient, st *pb.WorkflowActionStatus, data []byte, checksum string) { + l := logger.With("workflowID", st.GetWorkflowId, + "workerID", st.GetWorkerId(), + "actionName", st.GetActionName(), + "taskName", st.GetTaskName(), + ) meta := WorkflowMetadata{ WorkerID: st.GetWorkerId(), Action: st.GetActionName(), @@ -286,39 +324,41 @@ func sendUpdate(ctx context.Context, client pb.WorkflowSvcClient, st *pb.Workflo } metadata, err := json.Marshal(meta) if err != nil { - log.Fatal(err) + l.Error(err) + os.Exit(1) } - log.Debugf("Sending updated data: %v\n", string(data)) _, err = client.UpdateWorkflowData(ctx, &pb.UpdateWorkflowDataRequest{ WorkflowID: st.GetWorkflowId(), Data: data, Metadata: metadata, }) if err != nil { - log.Fatal(err) + l.Error(err) + os.Exit(1) } } -func openDataFile(wfDir string) *os.File { +func openDataFile(wfDir string, l log.Logger) *os.File { f, err := os.OpenFile(wfDir+string(os.PathSeparator)+dataFile, os.O_RDWR|os.O_CREATE, 0644) if err != nil { - log.Fatal(err) + l.Error(err) + os.Exit(1) } return f } -func isValidDataFile(f *os.File, data []byte) bool { +func isValidDataFile(f *os.File, data []byte, l log.Logger) bool { var dataMap map[string]interface{} err := json.Unmarshal(data, &dataMap) if err != nil { - log.Print(err) + l.Error(err) return false } stat, err := f.Stat() if err != nil { - log.Print(err) + logger.Error(err) return false } @@ -326,7 +366,7 @@ func isValidDataFile(f *os.File, data []byte) bool { if val != "" { maxSize, err := strconv.ParseInt(val, 10, 64) if err == nil { - log.Print(err) + logger.Error(err) } return stat.Size() <= maxSize } diff --git a/db/mock/mock.go b/db/mock/mock.go index 8773a5dfb..789efe88a 100644 --- a/db/mock/mock.go +++ b/db/mock/mock.go @@ -4,11 +4,15 @@ import ( "context" "time" + "github.com/google/uuid" + "github.com/tinkerbell/tink/db" pb "github.com/tinkerbell/tink/protos/workflow" ) // DB is the mocked implementation of Database interface type DB struct { + // workflow + CreateWorkflowFunc func(ctx context.Context, wf db.Workflow, data string, id uuid.UUID) error GetfromWfDataTableFunc func(ctx context.Context, req *pb.GetWorkflowDataRequest) ([]byte, error) InsertIntoWfDataTableFunc func(ctx context.Context, req *pb.UpdateWorkflowDataRequest) error GetWorkflowMetadataFunc func(ctx context.Context, req *pb.GetWorkflowDataRequest) ([]byte, error) @@ -18,4 +22,7 @@ type DB struct { GetWorkflowActionsFunc func(ctx context.Context, wfID string) (*pb.WorkflowActionList, error) UpdateWorkflowStateFunc func(ctx context.Context, wfContext *pb.WorkflowContext) error InsertIntoWorkflowEventTableFunc func(ctx context.Context, wfEvent *pb.WorkflowActionStatus, time time.Time) error + + // template + GetTemplateFunc func(ctx context.Context, id string) (string, string, error) } diff --git a/db/mock/template.go b/db/mock/template.go index 5031660a8..6e083c718 100644 --- a/db/mock/template.go +++ b/db/mock/template.go @@ -37,7 +37,7 @@ func (d DB) CreateTemplate(ctx context.Context, name string, data string, id uui // GetTemplate returns a workflow template func (d DB) GetTemplate(ctx context.Context, id string) (string, string, error) { - return "", "", nil + return d.GetTemplateFunc(ctx, id) } // DeleteTemplate deletes a workflow template diff --git a/db/mock/workflow.go b/db/mock/workflow.go index c3504692e..b49a875ce 100644 --- a/db/mock/workflow.go +++ b/db/mock/workflow.go @@ -11,7 +11,7 @@ import ( // CreateWorkflow creates a new workflow func (d DB) CreateWorkflow(ctx context.Context, wf db.Workflow, data string, id uuid.UUID) error { - return nil + return d.CreateWorkflowFunc(ctx, wf, data, id) } // InsertIntoWfDataTable : Insert ephemeral data in workflow_data table diff --git a/db/template.go b/db/template.go index 0f765558d..9e2f07acf 100644 --- a/db/template.go +++ b/db/template.go @@ -56,13 +56,11 @@ func (d TinkDB) GetTemplate(ctx context.Context, id string) (string, string, err if err == nil { return string(name), string(data), nil } - if err != sql.ErrNoRows { err = errors.Wrap(err, "SELECT") logger.Error(err) } - - return "", "", nil + return "", "", err } // DeleteTemplate deletes a workflow template diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 26407d408..c8c9dfa9e 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -5,10 +5,6 @@ services: restart: unless-stopped environment: FACILITY: ${FACILITY:-onprem} - PACKET_ENV: ${PACKET_ENV:-testing} - PACKET_VERSION: ${PACKET_VERSION:-ignored} - ROLLBAR_TOKEN: ${ROLLBAR_TOKEN:-ignored} - ROLLBAR_DISABLE: ${ROLLBAR_DISABLE:-1} PGDATABASE: tinkerbell PGHOST: db PGPASSWORD: tinkerbell @@ -97,10 +93,6 @@ services: API_AUTH_TOKEN: ${PACKET_API_AUTH_TOKEN:-ignored} API_CONSUMER_TOKEN: ${PACKET_CONSUMER_TOKEN:-ignored} FACILITY_CODE: ${FACILITY:-onprem} - PACKET_ENV: ${PACKET_ENV:-testing} - PACKET_VERSION: ${PACKET_VERSION:-ignored} - ROLLBAR_TOKEN: ${ROLLBAR_TOKEN:-ignored} - ROLLBAR_DISABLE: ${ROLLBAR_DISABLE:-1} MIRROR_HOST: ${TINKERBELL_NGINX_IP:-127.0.0.1} DNS_SERVERS: 8.8.8.8 PUBLIC_IP: $TINKERBELL_HOST_IP @@ -137,10 +129,6 @@ services: restart: unless-stopped network_mode: host environment: - ROLLBAR_TOKEN: ${ROLLBAR_TOKEN-ignored} - ROLLBAR_DISABLE: 1 - PACKET_ENV: testing - PACKET_VERSION: ${PACKET_VERSION:-ignored} GRPC_PORT: 42115 HEGEL_FACILITY: ${FACILITY:-onprem} HEGEL_USE_TLS: 0 diff --git a/deploy/terraform/hardware_data.tpl b/deploy/terraform/hardware_data.tpl deleted file mode 100644 index ce13c1db0..000000000 --- a/deploy/terraform/hardware_data.tpl +++ /dev/null @@ -1,32 +0,0 @@ -{ - "id": "${id}", - "metadata": { - "facility": { - "facility_code": "${facility_code}", - "plan_slug": "${plan_slug}", - "plan_version_slug": "" - }, - "instance": {}, - "state": "" - }, - "network": { - "interfaces": [ - { - "dhcp": { - "arch": "x86_64", - "ip": { - "address": "${address}", - "gateway": "192.168.1.1", - "netmask": "255.255.255.248" - }, - "mac": "${mac}", - "uefi": false - }, - "netboot": { - "allow_pxe": true, - "allow_workflow": true - } - } - ] - } -} diff --git a/deploy/terraform/install_package.sh b/deploy/terraform/install_package.sh deleted file mode 100644 index 95e86fdd5..000000000 --- a/deploy/terraform/install_package.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash - -YUM="yum" -APT="apt" -PIP3="pip3" -YUM_CONFIG_MGR="yum-config-manager" -WHICH_YUM=$(command -v $YUM) -WHICH_APT=$(command -v $APT) -YUM_INSTALL="$YUM install" -APT_INSTALL="$APT install" -PIP3_INSTALL="$PIP3 install" -declare -a YUM_LIST=("https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm" - "docker-ce" - "docker-ce-cli" - "epel-release" - "pass" - "python3") -declare -a APT_LIST=("docker" - "docker-compose" "pass") - -add_yum_repo() ( - $YUM_CONFIG_MGR --add-repo https://download.docker.com/linux/centos/docker-ce.repo -) - -update_yum() ( - $YUM_INSTALL -y yum-utils - add_yum_repo -) - -update_apt() ( - $APT update - DEBIAN_FRONTEND=noninteractive $APT --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade -) - -restart_docker_service() ( - service docker restart -) -install_yum_packages() ( - $YUM_INSTALL "${YUM_LIST[@]}" -y -) - -install_pip3_packages() ( - $PIP3_INSTALL docker-compose -) - -install_apt_packages() ( - $APT_INSTALL "${APT_LIST[@]}" -y -) - -main() ( - if [[ -n $WHICH_YUM ]]; then - update_yum - install_yum_packages - install_pip3_packages - restart_docker_service - elif [[ -n $WHICH_APT ]]; then - update_apt - install_apt_packages - restart_docker_service - else - echo "Unknown platform. Error while installing the required packages" - exit 1 - fi -) - -main diff --git a/deploy/terraform/main.tf b/deploy/terraform/main.tf deleted file mode 100644 index 9cd4f12f7..000000000 --- a/deploy/terraform/main.tf +++ /dev/null @@ -1,141 +0,0 @@ -# Configure the Packet Provider. -terraform { - required_providers { - packet = { - source = "packethost/packet" - version = "~> 3.0.1" - } - null = { - source = "hashicorp/null" - } - } -} - -provider "packet" { - auth_token = var.packet_api_token -} - -# Create a new VLAN in datacenter "ewr1" -resource "packet_vlan" "provisioning_vlan" { - description = "provisioning_vlan" - facility = var.facility - project_id = var.project_id -} - -# Create a device and add it to tf_project_1 -resource "packet_device" "tink_provisioner" { - hostname = "tink-provisioner" - plan = var.device_type - facilities = [var.facility] - operating_system = "ubuntu_18_04" - billing_cycle = "hourly" - project_id = var.project_id - user_data = file("install_package.sh") -} - -resource "null_resource" "tink_directory" { - connection { - type = "ssh" - user = var.ssh_user - host = packet_device.tink_provisioner.network[0].address - } - - provisioner "remote-exec" { - inline = [ - "mkdir -p /root/tink/deploy" - ] - } - - provisioner "file" { - source = "../../setup.sh" - destination = "/root/tink/setup.sh" - } - - provisioner "file" { - source = "../../generate-envrc.sh" - destination = "/root/tink/generate-envrc.sh" - } - - provisioner "file" { - source = "../../deploy" - destination = "/root/tink" - } - - provisioner "remote-exec" { - inline = [ - "chmod +x /root/tink/*.sh /root/tink/deploy/tls/*.sh" - ] - } -} - -resource "packet_device_network_type" "tink_provisioner_network_type" { - device_id = packet_device.tink_provisioner.id - type = "hybrid" -} - -# Create a device and add it to tf_project_1 -resource "packet_device" "tink_worker" { - count = var.worker_count - - hostname = "tink-worker-${count.index}" - plan = var.device_type - facilities = [var.facility] - operating_system = "custom_ipxe" - ipxe_script_url = "https://boot.netboot.xyz" - always_pxe = "true" - billing_cycle = "hourly" - project_id = var.project_id -} - -resource "packet_device_network_type" "tink_worker_network_type" { - count = var.worker_count - - device_id = packet_device.tink_worker[count.index].id - type = "layer2-individual" -} - -# Attach VLAN to provisioner -resource "packet_port_vlan_attachment" "provisioner" { - depends_on = [packet_device_network_type.tink_provisioner_network_type] - device_id = packet_device.tink_provisioner.id - port_name = "eth1" - vlan_vnid = packet_vlan.provisioning_vlan.vxlan -} - -# Attach VLAN to worker -resource "packet_port_vlan_attachment" "worker" { - count = var.worker_count - depends_on = [packet_device_network_type.tink_worker_network_type] - - device_id = packet_device.tink_worker[count.index].id - port_name = "eth0" - vlan_vnid = packet_vlan.provisioning_vlan.vxlan -} - -data "template_file" "worker_hardware_data" { - count = var.worker_count - template = file("${path.module}/hardware_data.tpl") - vars = { - id = packet_device.tink_worker[count.index].id - facility_code = packet_device.tink_worker[count.index].deployed_facility - plan_slug = packet_device.tink_worker[count.index].plan - address = "192.168.1.${count.index + 5}" - mac = packet_device.tink_worker[count.index].ports[1].mac - } -} - -resource "null_resource" "hardware_data" { - count = var.worker_count - depends_on = [null_resource.tink_directory] - - connection { - type = "ssh" - user = var.ssh_user - host = packet_device.tink_provisioner.network[0].address - } - - provisioner "file" { - content = data.template_file.worker_hardware_data[count.index].rendered - destination = "/root/tink/deploy/hardware-data-${count.index}.json" - } -} diff --git a/deploy/terraform/outputs.tf b/deploy/terraform/outputs.tf deleted file mode 100644 index 936b2ed3b..000000000 --- a/deploy/terraform/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "provisioner_dns_name" { - value = "${split("-", packet_device.tink_provisioner.id)[0]}.packethost.net" -} - -output "provisioner_ip" { - value = packet_device.tink_provisioner.network[0].address -} - -output "worker_mac_addr" { - value = formatlist("%s", packet_device.tink_worker[*].ports[1].mac) -} - -output "worker_sos" { - value = formatlist("%s@sos.%s.packet.net", packet_device.tink_worker[*].id, packet_device.tink_worker[*].deployed_facility) -} diff --git a/deploy/terraform/variables.tf b/deploy/terraform/variables.tf deleted file mode 100644 index 7b369ed8d..000000000 --- a/deploy/terraform/variables.tf +++ /dev/null @@ -1,32 +0,0 @@ -variable "packet_api_token" { - description = "Packet user api token" - type = string -} - -variable "project_id" { - description = "Project ID" - type = string -} - -variable "worker_count" { - description = "Number of Workers" - type = number - default = 1 -} -variable "facility" { - description = "Packet facility to provision in" - type = string - default = "sjc1" -} - -variable "device_type" { - type = string - description = "Type of device to provision" - default = "c3.small.x86" -} - -variable "ssh_user" { - description = "Username that will be used to transfer file from your local environment to the provisioner" - type = string - default = "root" -} diff --git a/deploy/terraform/versions.tf b/deploy/terraform/versions.tf deleted file mode 100644 index 6b6318def..000000000 --- a/deploy/terraform/versions.tf +++ /dev/null @@ -1,3 +0,0 @@ -terraform { - required_version = ">= 0.13" -} diff --git a/generate-envrc.sh b/generate-envrc.sh index 6140a1d24..f033a1c6e 100755 --- a/generate-envrc.sh +++ b/generate-envrc.sh @@ -80,8 +80,6 @@ export TINKERBELL_REGISTRY_PASSWORD="$registry_password" # Legacy options, to be deleted: export FACILITY=onprem -export ROLLBAR_TOKEN=ignored -export ROLLBAR_DISABLE=1 EOF ) diff --git a/go.mod b/go.mod index c8b7881ce..4a6c2693b 100644 --- a/go.mod +++ b/go.mod @@ -24,18 +24,16 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/packethost/pkg v0.0.0-20190410153520-e8e15f4ce770 + github.com/packethost/pkg v0.0.0-20200903155310-0433e0605550 github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v1.1.0 - github.com/rollbar/rollbar-go v1.0.2 // indirect - github.com/sirupsen/logrus v1.4.1 + github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v1.0.0 github.com/spf13/viper v1.4.0 github.com/stretchr/testify v1.3.0 go.mongodb.org/mongo-driver v1.1.2 // indirect golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect golang.org/x/sys v0.0.0-20200331124033-c3d80250170d // indirect - golang.org/x/text v0.3.2 // indirect golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/grpc v1.29.1 diff --git a/go.sum b/go.sum index 0a68b54ae..54a774fa7 100644 --- a/go.sum +++ b/go.sum @@ -123,6 +123,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= @@ -153,8 +155,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/packethost/pkg v0.0.0-20190410153520-e8e15f4ce770 h1:M/ErkHz96yXYNdcu0G7kX5Qa1HUliL5QczNdA9/0k40= -github.com/packethost/pkg v0.0.0-20190410153520-e8e15f4ce770/go.mod h1:cbOkI4WbX7B68Nj552pbadMrjVy2oMIVazIEo7z+qWQ= +github.com/packethost/pkg v0.0.0-20200903155310-0433e0605550 h1:/ojL7LAVjyH1MY+db0+j6rcWU3UWWpzHksYFsHWs9vQ= +github.com/packethost/pkg v0.0.0-20200903155310-0433e0605550/go.mod h1:GSv7cTtIjns4yc0pyajaM1RE/KE4djJONoblFIRDrxA= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= @@ -168,6 +170,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= @@ -195,6 +198,8 @@ github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= @@ -211,6 +216,7 @@ github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= @@ -248,6 +254,7 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -262,8 +269,10 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA= @@ -287,6 +296,7 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= @@ -295,6 +305,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaR google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= diff --git a/grpc-server/hardware.go b/grpc-server/hardware.go index 9a4db0a65..704a6f7ba 100644 --- a/grpc-server/hardware.go +++ b/grpc-server/hardware.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "time" "github.com/pkg/errors" @@ -29,8 +30,16 @@ func (s *server) Push(ctx context.Context, in *hardware.PushRequest) (*hardware. // must be a copy so deferred cacheInFlight.Dec matches the Inc labels = prometheus.Labels{"method": "Push", "op": ""} - hw := in.Data - if hw.Id == "" { + hw := in.GetData() + if hw == nil { + err := errors.New("expected data not to be nil") + logger.Error(err) + return &hardware.Empty{}, err + } + + // we know hw is non-nil at this point, since we returned early above + // if it was nil + if hw.GetId() == "" { metrics.CacheTotals.With(labels).Inc() metrics.CacheErrors.With(labels).Inc() err := errors.New("id must be set to a UUID, got id: " + hw.Id) @@ -38,6 +47,9 @@ func (s *server) Push(ctx context.Context, in *hardware.PushRequest) (*hardware. return &hardware.Empty{}, err } + // normalize data prior to storing in the database + normalizeHardwareData(hw) + // validate the hardware data to avoid duplicate mac address err := s.validateHardwareData(ctx, hw) if err != nil { @@ -292,22 +304,35 @@ func (s *server) Delete(ctx context.Context, in *hardware.DeleteRequest) (*hardw } func (s *server) validateHardwareData(ctx context.Context, hw *hardware.Hardware) error { - interfaces := hw.GetNetwork().GetInterfaces() - for i := range hw.GetNetwork().GetInterfaces() { - data, _ := s.db.GetByMAC(ctx, interfaces[i].GetDhcp().GetMac()) - if data != "" { - logger.With("MAC", interfaces[i].GetDhcp().GetMac()).Info(duplicateMAC) + for _, iface := range hw.GetNetwork().GetInterfaces() { + mac := iface.GetDhcp().GetMac() + + if data, _ := s.db.GetByMAC(ctx, mac); data != "" { + logger.With("MAC", mac).Info(duplicateMAC) + newhw := hardware.Hardware{} - err := json.Unmarshal([]byte(data), &newhw) - if err != nil { + if err := json.Unmarshal([]byte(data), &newhw); err != nil { logger.Error(err, "Failed to unmarshal hardware data") return err } + if newhw.Id == hw.Id { return nil } - return errors.New(fmt.Sprintf(conflictMACAddr, interfaces[i].GetDhcp().GetMac())) + + return fmt.Errorf(conflictMACAddr, mac) } } + return nil } + +func normalizeHardwareData(hw *hardware.Hardware) { + // Ensure MAC is stored as lowercase + for _, iface := range hw.GetNetwork().GetInterfaces() { + dhcp := iface.GetDhcp() + if mac := dhcp.GetMac(); mac != "" { + dhcp.Mac = strings.ToLower(mac) + } + } +} diff --git a/grpc-server/hardware_test.go b/grpc-server/hardware_test.go new file mode 100644 index 000000000..78d1e0217 --- /dev/null +++ b/grpc-server/hardware_test.go @@ -0,0 +1,179 @@ +package grpcserver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tinkerbell/tink/protos/hardware" +) + +func Test_server_normalizeHardwareData(t *testing.T) { + tests := []struct { + name string + given *hardware.Hardware + expected *hardware.Hardware + }{ + { + name: "Expect MAC to be normalized to lowercase", + given: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:42:0E:D9:C7:53", + }, + }, + }, + }, + }, + expected: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:42:0e:d9:c7:53", + }, + }, + }, + }, + }, + }, + { + name: "Expect MAC to be normalized to lowercase, multiple interfaces", + given: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:42:0E:D9:C7:53", + }, + }, + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:4F:0E:D9:C7:5E", + }, + }, + }, + }, + }, + expected: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:42:0e:d9:c7:53", + }, + }, + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:4f:0e:d9:c7:5e", + }, + }, + }, + }, + }, + }, + { + name: "Expect MAC to be normalized to lowercase, nultiple interfaces, mixed casing", + given: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:42:0e:d9:c7:53", + }, + }, + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:4F:0E:D9:C7:5E", + }, + }, + }, + }, + }, + expected: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:42:0e:d9:c7:53", + }, + }, + { + Dhcp: &hardware.Hardware_DHCP{ + Mac: "02:4f:0e:d9:c7:5e", + }, + }, + }, + }, + }, + }, + { + name: "Handle nil DHCP", + given: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: nil, + }, + }, + }, + }, + expected: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + { + Dhcp: nil, + }, + }, + }, + }, + }, + { + name: "Handle nil Interface", + given: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + nil, + }, + }, + }, + expected: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: []*hardware.Hardware_Network_Interface{ + nil, + }, + }, + }, + }, + { + name: "Handle nil Interfaces", + given: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: nil, + }, + }, + expected: &hardware.Hardware{ + Network: &hardware.Hardware_Network{ + Interfaces: nil, + }, + }, + }, + { + name: "Handle nil Network", + given: &hardware.Hardware{Network: nil}, + expected: &hardware.Hardware{Network: nil}, + }, + { + name: "Handle nil Hardware", + given: nil, + expected: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.NotPanics(t, func() { normalizeHardwareData(tt.given) }) + assert.Equal(t, tt.expected, tt.given) + }) + } +} diff --git a/grpc-server/tinkerbell_test.go b/grpc-server/tinkerbell_test.go index 2bb5d5114..65dff32e2 100644 --- a/grpc-server/tinkerbell_test.go +++ b/grpc-server/tinkerbell_test.go @@ -34,11 +34,8 @@ func testServer(db db.Database) *server { } func TestMain(m *testing.M) { - os.Setenv("PACKET_ENV", "test") - os.Setenv("PACKET_VERSION", "ignored") - os.Setenv("ROLLBAR_TOKEN", "ignored") - l, _, _ := log.Init("github.com/tinkerbell/tink") + l, _ := log.Init("github.com/tinkerbell/tink") logger = l.Package("grpcserver") metrics.SetupMetrics("onprem", logger) os.Exit(m.Run()) diff --git a/grpc-server/workflow.go b/grpc-server/workflow.go index c34766d42..4bfe65ac2 100644 --- a/grpc-server/workflow.go +++ b/grpc-server/workflow.go @@ -24,6 +24,11 @@ var state = map[int32]workflow.State{ 4: workflow.State_SUCCESS, } +const ( + errFailedToGetTemplate = "failed to get template with ID: %s" + errTemplateParsing = "failed to parse template with ID: %s" +) + // CreateWorkflow implements workflow.CreateWorkflow func (s *server) CreateWorkflow(ctx context.Context, in *workflow.CreateRequest) (*workflow.CreateResponse, error) { logger.Info("createworkflow") @@ -47,7 +52,6 @@ func (s *server) CreateWorkflow(ctx context.Context, in *workflow.CreateRequest) data, err := createYaml(ctx, s.db, in.Template, in.Hardware) if err != nil { metrics.CacheErrors.With(labels).Inc() - err = errors.Wrap(err, "failed to create Yaml") logger.Error(err) return &workflow.CreateResponse{}, err } @@ -269,22 +273,24 @@ func (s *server) ShowWorkflowEvents(req *workflow.GetRequest, stream workflow.Wo func createYaml(ctx context.Context, db db.Database, temp string, devices string) (string, error) { _, tempData, err := db.GetTemplate(ctx, temp) if err != nil { - return "", err + return "", errors.Wrapf(err, errFailedToGetTemplate, temp) } - return renderTemplate(string(tempData), []byte(devices)) + return renderTemplate(temp, tempData, []byte(devices)) } -func renderTemplate(tempData string, devices []byte) (string, error) { +func renderTemplate(templateID, tempData string, devices []byte) (string, error) { var hardware map[string]interface{} err := json.Unmarshal(devices, &hardware) if err != nil { + err = errors.Wrapf(err, errTemplateParsing, templateID) logger.Error(err) - return "", nil + return "", err } t := template.New("workflow-template") _, err = t.Parse(string(tempData)) if err != nil { + err = errors.Wrapf(err, errTemplateParsing, templateID) logger.Error(err) return "", nil } @@ -292,7 +298,9 @@ func renderTemplate(tempData string, devices []byte) (string, error) { buf := new(bytes.Buffer) err = t.Execute(buf, hardware) if err != nil { - return "", nil + err = errors.Wrapf(err, errTemplateParsing, templateID) + logger.Error(err) + return "", err } return buf.String(), nil } diff --git a/grpc-server/workflow_test.go b/grpc-server/workflow_test.go new file mode 100644 index 000000000..aa1420a83 --- /dev/null +++ b/grpc-server/workflow_test.go @@ -0,0 +1,112 @@ +package grpcserver + +import ( + "context" + "testing" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/tinkerbell/tink/db" + "github.com/tinkerbell/tink/db/mock" + "github.com/tinkerbell/tink/protos/workflow" +) + +const ( + templateID = "e29b6444-1de7-4a69-bf25-6ea4ae869005" + hw = `{"device_1": "08:00:27:00:00:01"}` + templateData = `version: "0.1" +name: hello_world_workflow +global_timeout: 600 +tasks: + - name: "hello world" + worker: "{{.device_1}}" + actions: + - name: "hello_world" + image: hello-world + timeout: 60` +) + +func TestCreateWorkflow(t *testing.T) { + type ( + args struct { + db mock.DB + wfTemplate, wfHardware string + } + want struct { + expectedError bool + } + ) + testCases := map[string]struct { + args args + want want + }{ + "FailedToGetTempalte": { + args: args{ + db: mock.DB{ + GetTemplateFunc: func(ctx context.Context, id string) (string, string, error) { + return "", "", errors.New("failed to get template") + }, + }, + wfTemplate: templateID, + wfHardware: hw, + }, + want: want{ + expectedError: true, + }, + }, + "FailedCreatingWorkflow": { + args: args{ + db: mock.DB{ + GetTemplateFunc: func(ctx context.Context, id string) (string, string, error) { + return "", templateData, nil + }, + CreateWorkflowFunc: func(ctx context.Context, wf db.Workflow, data string, id uuid.UUID) error { + return errors.New("failed to create a workfow") + }, + }, + wfTemplate: templateID, + wfHardware: hw, + }, + want: want{ + expectedError: true, + }, + }, + "SuccessCreatingWorkflow": { + args: args{ + db: mock.DB{ + GetTemplateFunc: func(ctx context.Context, id string) (string, string, error) { + return "", templateData, nil + }, + CreateWorkflowFunc: func(ctx context.Context, wf db.Workflow, data string, id uuid.UUID) error { + return nil + }, + }, + wfTemplate: templateID, + wfHardware: hw, + }, + want: want{ + expectedError: false, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + s := testServer(tc.args.db) + res, err := s.CreateWorkflow(context.TODO(), &workflow.CreateRequest{ + Hardware: tc.args.wfHardware, + Template: tc.args.wfTemplate, + }) + if err != nil { + assert.Error(t, err) + assert.Empty(t, res) + assert.True(t, tc.want.expectedError) + return + } + assert.NoError(t, err) + assert.NotEmpty(t, res) + assert.False(t, tc.want.expectedError) + }) + } +} diff --git a/http-server/http_handlers_test.go b/http-server/http_handlers_test.go index 091c1654e..a3c0225dd 100644 --- a/http-server/http_handlers_test.go +++ b/http-server/http_handlers_test.go @@ -53,12 +53,7 @@ func (s *server) Push(ctx context.Context, in *hardware.PushRequest) (*hardware. } func TestMain(m *testing.M) { - os.Setenv("PACKET_ENV", "test") - os.Setenv("PACKET_VERSION", "ignored") - os.Setenv("ROLLBAR_TOKEN", "ignored") - - logger, _, _ = log.Init("github.com/tinkerbell/tink") - + logger, _ = log.Init("github.com/tinkerbell/tink") lis = bufconn.Listen(bufSize) s := grpc.NewServer() hardware.RegisterHardwareServiceServer(s, &server{}) @@ -67,7 +62,6 @@ func TestMain(m *testing.M) { logger.Info("Server exited with error: %v", err) } }() - os.Exit(m.Run()) }