From ee510cdb9c71699ced7ec78587f7dc04ddd782d3 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Wed, 31 Jul 2024 09:00:36 -0400 Subject: [PATCH 01/24] add time measurements in cli, engine, log file parsing --- cli/cli/commands/service/logs/logs.go | 8 ++++++++ cli/cli/scripts/build.sh | 9 +++++---- .../per_week_stream_logs_strategy.go | 7 +++++++ .../engine/server/engine_connect_server_service.go | 8 ++++++++ engine/server/go.mod | 2 +- engine/server/go.sum | 4 ++-- 6 files changed, 31 insertions(+), 7 deletions(-) diff --git a/cli/cli/commands/service/logs/logs.go b/cli/cli/commands/service/logs/logs.go index 550cb5df0b..1c7e52a0cf 100644 --- a/cli/cli/commands/service/logs/logs.go +++ b/cli/cli/commands/service/logs/logs.go @@ -27,6 +27,7 @@ import ( "os" "os/signal" "strconv" + "time" ) const ( @@ -262,13 +263,16 @@ func run( interruptChan := make(chan os.Signal, interruptChanBufferSize) signal.Notify(interruptChan, os.Interrupt) + var totalLogPrintDuration time.Duration for { select { case serviceLogsStreamContent, isChanOpen := <-serviceLogsStreamContentChan: if !isChanOpen { + logrus.Infof("CLI [logs.txt] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) return nil } + startTime := time.Now() notFoundServiceUuids := serviceLogsStreamContent.GetNotFoundServiceUuids() for notFoundServiceUuid := range notFoundServiceUuids { @@ -287,10 +291,14 @@ func run( out.PrintOutLn(fmt.Sprintf("[%v] %v", colorPrinter(serviceIdentifier), serviceLog.GetContent())) } } + endTime := time.Now() + totalLogPrintDuration = endTime.Sub(startTime) case <-interruptChan: logrus.Debugf("Received signal interruption in service logs Kurtosis CLI command") + logrus.Infof("CLI [logs.go] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) return nil } + logrus.Infof("CLI [logs.go] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) } } diff --git a/cli/cli/scripts/build.sh b/cli/cli/scripts/build.sh index 0f8401c209..db8f5d7443 100755 --- a/cli/cli/scripts/build.sh +++ b/cli/cli/scripts/build.sh @@ -97,10 +97,11 @@ fi exit 1 fi # Executing goreleaser v1.26.2 without needing to install it - if ! curl -sfL https://goreleaser.com/static/run | VERSION=v1.26.2 DISTRIBUTION=oss bash -s -- ${goreleaser_verb_and_flags}; then - echo "Error: Couldn't build the CLI binary for the current OS/arch" >&2 - exit 1 - fi +# if ! curl -sfL https://goreleaser.com/static/run | VERSION=v1.26.2 DISTRIBUTION=oss bash -s -- ${goreleaser_verb_and_flags}; then + if ! GORELEASER_CURRENT_TAG=$(cat $root_dirpath/version.txt) goreleaser ${goreleaser_verb_and_flags}; then + echo "Error: Couldn't build the CLI binary for the current OS/arch" >&2 + exit 1 + fi ) # Final verification diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index 755df67123..f1117e80d9 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -183,14 +183,17 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, serviceUuid service.ServiceUUID, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) error { + var totalLogFileReadDuration time.Duration for { select { case <-ctx.Done(): logrus.Debugf("Context was canceled, stopping streaming service logs for service '%v'", serviceUuid) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalLogFileReadDuration) return nil default: jsonLogStr, err := getCompleteJsonLogString(logsReader) if isValidJsonEnding(jsonLogStr) { + startTime := time.Now() jsonLog, err := convertStringToJson(jsonLogStr) if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) @@ -198,13 +201,17 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( if err = strategy.sendJsonLogLine(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { return err } + endTime := time.Now() + totalLogFileReadDuration += endTime.Sub(startTime) } if err != nil { // if we've reached end of logs, return success, otherwise return the error if errors.Is(err, io.EOF) { + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalLogFileReadDuration) return nil } else { + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalLogFileReadDuration) return err } } diff --git a/engine/server/engine/server/engine_connect_server_service.go b/engine/server/engine/server/engine_connect_server_service.go index 3fc908e916..1921e0725d 100644 --- a/engine/server/engine/server/engine_connect_server_service.go +++ b/engine/server/engine/server/engine_connect_server_service.go @@ -347,6 +347,7 @@ func (service *EngineConnectServerService) GetServiceLogs(ctx context.Context, c } }() + var totalLogStreamDuration time.Duration for { select { //stream case @@ -354,24 +355,31 @@ func (service *EngineConnectServerService) GetServiceLogs(ctx context.Context, c //If the channel is closed means that the logs database client won't continue sending streams if !isChanOpen { logrus.Debug("Exiting the stream loop after receiving a close signal from the service logs by service UUID channel") + logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil } + startTime := time.Now() getServiceLogsResponse := newLogsResponse(requestedServiceUuids, serviceLogsByServiceUuid, notFoundServiceUuids) if err := stream.Send(getServiceLogsResponse); err != nil { return stacktrace.Propagate(err, "An error occurred sending the stream logs for service logs response '%+v'", getServiceLogsResponse) } + endTime := time.Now() + totalLogStreamDuration += endTime.Sub(startTime) //client cancel ctx case case <-contextWithCancel.Done(): logrus.Debug("The user service logs stream has done") + logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil //error from logs database case case err, isChanOpen := <-errChan: if isChanOpen { logrus.Debug("Exiting the stream because an error from the logs database client was received through the error chan.") + logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return stacktrace.Propagate(err, "An error occurred streaming user service logs.") } logrus.Debug("Exiting the stream loop after receiving a close signal from the error chan") + logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil } } diff --git a/engine/server/go.mod b/engine/server/go.mod index f6548f984e..4660cedcc6 100644 --- a/engine/server/go.mod +++ b/engine/server/go.mod @@ -63,7 +63,7 @@ require ( github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc github.com/kurtosis-tech/kurtosis/metrics-library/golang v0.0.0-20231206095907-9bdf0d02cb90 github.com/labstack/echo/v4 v4.11.3 - github.com/rs/cors v1.9.0 + github.com/rs/cors v1.11.0 github.com/spf13/afero v1.10.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 k8s.io/apimachinery v0.27.2 diff --git a/engine/server/go.sum b/engine/server/go.sum index b7fd1e32d1..c027f4c074 100644 --- a/engine/server/go.sum +++ b/engine/server/go.sum @@ -312,8 +312,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= -github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= +github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/segmentio/backo-go v1.0.0 h1:kbOAtGJY2DqOR0jfRkYEorx/b18RgtepGtY3+Cpe6qA= github.com/segmentio/backo-go v1.0.0/go.mod h1:kJ9mm9YmoWSkk+oQ+5Cj8DEoRCX2JT6As4kEtIIOp1M= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= From f8811264790fd070e0af8a3372d8962b83cd4462 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Wed, 31 Jul 2024 10:39:56 -0400 Subject: [PATCH 02/24] add more granular measurements --- cli/cli/commands/service/logs/logs.go | 1 - .../per_week_stream_logs_strategy.go | 92 ++++++++++++++++++- .../server/engine_connect_server_service.go | 11 ++- 3 files changed, 97 insertions(+), 7 deletions(-) diff --git a/cli/cli/commands/service/logs/logs.go b/cli/cli/commands/service/logs/logs.go index 1c7e52a0cf..028039224c 100644 --- a/cli/cli/commands/service/logs/logs.go +++ b/cli/cli/commands/service/logs/logs.go @@ -298,7 +298,6 @@ func run( logrus.Infof("CLI [logs.go] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) return nil } - logrus.Infof("CLI [logs.go] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) } } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index f1117e80d9..ba7ac5e388 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -89,10 +89,12 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( }() if shouldReturnAllLogs { + startTime := time.Now() if err := strategy.streamAllLogs(ctx, logsReader, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred streaming all logs for service '%v' in enclave '%v'", serviceUuid, enclaveUuid) return } + logrus.Infof("TOTAL TIME IN STREAM ALL LOGS FUNCTION: %v", time.Now().Sub(startTime)) } else { if err := strategy.streamTailLogs(ctx, logsReader, numLogLines, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred streaming '%v' logs for service '%v' in enclave '%v'", numLogLines, serviceUuid, enclaveUuid) @@ -184,23 +186,38 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( serviceUuid service.ServiceUUID, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) error { var totalLogFileReadDuration time.Duration + var totalTimeToGetJsonStrings time.Duration + var totalTimeToSendJsonLogs time.Duration + var totalTimeProcessLinesInSend time.Duration + var totalTimeSendingAcrossChannelInSend time.Duration for { select { case <-ctx.Done(): logrus.Debugf("Context was canceled, stopping streaming service logs for service '%v'", serviceUuid) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalLogFileReadDuration) + logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, totalTimeSendingAcrossChannelInSend, totalTimeProcessLinesInSend) return nil default: + startTime := time.Now() + + getJsonStartTime := time.Now() jsonLogStr, err := getCompleteJsonLogString(logsReader) + totalTimeToGetJsonStrings += time.Now().Sub(getJsonStartTime) + if isValidJsonEnding(jsonLogStr) { - startTime := time.Now() jsonLog, err := convertStringToJson(jsonLogStr) if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - if err = strategy.sendJsonLogLine(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { + + sendJsonLogLineStartTime := time.Now() + err, sendDuration, processDuration := strategy.sendJsonLogLineWithTimes(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) + if err != nil { return err } + totalTimeToSendJsonLogs += time.Now().Sub(sendJsonLogLineStartTime) + totalTimeProcessLinesInSend += sendDuration + totalTimeProcessLinesInSend += processDuration + endTime := time.Now() totalLogFileReadDuration += endTime.Sub(startTime) } @@ -208,10 +225,10 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( if err != nil { // if we've reached end of logs, return success, otherwise return the error if errors.Is(err, io.EOF) { - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalLogFileReadDuration) + logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, totalTimeSendingAcrossChannelInSend, totalTimeProcessLinesInSend) return nil } else { - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalLogFileReadDuration) + logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, totalTimeSendingAcrossChannelInSend, totalTimeProcessLinesInSend) return err } } @@ -354,6 +371,63 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine( return nil } +func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( + jsonLog JsonLog, + logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + serviceUuid service.ServiceUUID, + conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (error, time.Duration, time.Duration) { + // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} + // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", + // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} + + var processDuration time.Duration + var sendDuration time.Duration + + processStart := time.Now() + // Then extract the actual log message using the vectors log field + logMsgStr, found := jsonLog[volume_consts.LogLabel] + if !found { + return stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog), sendDuration, processDuration + } + + // Extract the timestamp using vectors timestamp field + logTimestamp, err := parseTimestampFromJsonLogLine(jsonLog) + if err != nil { + return stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line."), sendDuration, processDuration + } + logLine := logline.NewLogLine(logMsgStr, *logTimestamp) + + // Then filter by checking if the log message is valid based on requested filters + validLogLine, err := logLine.IsValidLogLineBaseOnFilters(conjunctiveLogLinesFiltersWithRegex) + if err != nil { + return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), sendDuration, processDuration + } + if !validLogLine { + return nil, sendDuration, processDuration + } + + // ensure this log line is within the retention period if it has a timestamp + withinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) + if err != nil { + return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), sendDuration, processDuration + } + if !withinRetentionPeriod { + return nil, sendDuration, processDuration + } + + // send the log line + logLines := []logline.LogLine{*logLine} + userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ + serviceUuid: logLines, + } + processDuration = time.Now().Sub(processStart) + + sendStart := time.Now() + logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap + sendDuration = time.Now().Sub(sendStart) + return nil, sendDuration, processDuration +} + // Returns true if [logLine] has no timestamp func (strategy *PerWeekStreamLogsStrategy) isWithinRetentionPeriod(logLine *logline.LogLine) (bool, error) { retentionPeriod := strategy.time.Now().Add(time.Duration(-strategy.logRetentionPeriodInWeeks) * oneWeek) @@ -434,3 +508,11 @@ func parseTimestampFromJsonLogLine(logLine JsonLog) (*time.Time, error) { } return ×tamp, nil } + +func logTimes(totalDuration, getLineDuration, totalSendLineDuration, actualSendLineDuration, processLineDuraction time.Duration) { + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO GET JSON LINES: %v", getLineDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES: %v", totalSendLineDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES ACROSS CHANNEL: %v", actualSendLineDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO PROCESS JSON LINES BEFORE SENDING: %v", processLineDuraction) +} diff --git a/engine/server/engine/server/engine_connect_server_service.go b/engine/server/engine/server/engine_connect_server_service.go index 1921e0725d..a3f3c84225 100644 --- a/engine/server/engine/server/engine_connect_server_service.go +++ b/engine/server/engine/server/engine_connect_server_service.go @@ -348,6 +348,7 @@ func (service *EngineConnectServerService) GetServiceLogs(ctx context.Context, c }() var totalLogStreamDuration time.Duration + var counter int for { select { //stream case @@ -358,12 +359,20 @@ func (service *EngineConnectServerService) GetServiceLogs(ctx context.Context, c logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil } - startTime := time.Now() + // print out num log lines every 100 lines times + //for serviceUUID, logs := range serviceLogsByServiceUuid { + // if counter%100 == 0 { + // logrus.Infof("NUM LOG LINES FOR SERVICE '%v' CHECK IN ENGINE CONNECT SERVICE: %v", serviceUUID, len(logs)) + // } + //} + + startTime := time.Now() getServiceLogsResponse := newLogsResponse(requestedServiceUuids, serviceLogsByServiceUuid, notFoundServiceUuids) if err := stream.Send(getServiceLogsResponse); err != nil { return stacktrace.Propagate(err, "An error occurred sending the stream logs for service logs response '%+v'", getServiceLogsResponse) } + counter += 1 endTime := time.Now() totalLogStreamDuration += endTime.Sub(startTime) //client cancel ctx case From 487e5d1714b0a4f3884ce184d11af3825623fe4f Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Thu, 1 Aug 2024 01:26:41 -0400 Subject: [PATCH 03/24] use buffered log channel --- .../persistent_volume_logs_database_client.go | 5 +- .../per_week_stream_logs_strategy.go | 126 +++++++++++++++--- 2 files changed, 108 insertions(+), 23 deletions(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go index 46c00f00d2..c8ef39d57d 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go @@ -13,7 +13,8 @@ import ( ) const ( - oneSenderAdded = 1 + logLineBufferSize = 100 + oneSenderAdded = 1 ) // persistentVolumeLogsDatabaseClient pulls logs from a Docker volume the engine is mounted to @@ -63,7 +64,7 @@ func (client *persistentVolumeLogsDatabaseClient) StreamUserServiceLogs( streamErrChan := make(chan error) // this channel will return the user service log lines by service UUID - logsByKurtosisUserServiceUuidChan := make(chan map[service.ServiceUUID][]logline.LogLine) + logsByKurtosisUserServiceUuidChan := make(chan map[service.ServiceUUID][]logline.LogLine, logLineBufferSize) // MAKE IT A BUFFERED CHANNEL SEE HOW THAT IMPROVES THINGS wgSenders := &sync.WaitGroup{} for serviceUuid := range userServiceUuids { diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index ba7ac5e388..c0d339dd2f 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -185,16 +185,29 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, serviceUuid service.ServiceUUID, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) error { + var totalLogFileReadDuration time.Duration var totalTimeToGetJsonStrings time.Duration var totalTimeToSendJsonLogs time.Duration + + var totalTimeToSendLogsGranular time.Duration var totalTimeProcessLinesInSend time.Duration - var totalTimeSendingAcrossChannelInSend time.Duration + var totalTimestampParsing time.Duration + var totalFilterCheck time.Duration + var totalRetentionCheck time.Duration + + var ltm SendLogLineTimeMeasurements for { select { case <-ctx.Done(): logrus.Debugf("Context was canceled, stopping streaming service logs for service '%v'", serviceUuid) - logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, totalTimeSendingAcrossChannelInSend, totalTimeProcessLinesInSend) + logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, SendLogLineTimeMeasurements{ + processDuration: totalTimeProcessLinesInSend, + sendDuration: totalTimeToSendLogsGranular, + parseTimestampDuratoin: totalTimestampParsing, + filterCheckDuration: totalFilterCheck, + retentionPeriodCheckDuration: totalRetentionCheck, + }) return nil default: startTime := time.Now() @@ -210,13 +223,17 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( } sendJsonLogLineStartTime := time.Now() - err, sendDuration, processDuration := strategy.sendJsonLogLineWithTimes(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) + err, ltm = strategy.sendJsonLogLineWithTimes(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) if err != nil { return err } totalTimeToSendJsonLogs += time.Now().Sub(sendJsonLogLineStartTime) - totalTimeProcessLinesInSend += sendDuration - totalTimeProcessLinesInSend += processDuration + + totalTimeToSendLogsGranular += ltm.sendDuration + totalTimeProcessLinesInSend += ltm.processDuration + totalTimestampParsing += ltm.parseTimestampDuratoin + totalFilterCheck += ltm.filterCheckDuration + totalRetentionCheck += ltm.retentionPeriodCheckDuration endTime := time.Now() totalLogFileReadDuration += endTime.Sub(startTime) @@ -225,10 +242,22 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( if err != nil { // if we've reached end of logs, return success, otherwise return the error if errors.Is(err, io.EOF) { - logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, totalTimeSendingAcrossChannelInSend, totalTimeProcessLinesInSend) + logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, SendLogLineTimeMeasurements{ + processDuration: totalTimeProcessLinesInSend, + sendDuration: totalTimeToSendLogsGranular, + parseTimestampDuratoin: totalTimestampParsing, + filterCheckDuration: totalFilterCheck, + retentionPeriodCheckDuration: totalRetentionCheck, + }) return nil } else { - logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, totalTimeSendingAcrossChannelInSend, totalTimeProcessLinesInSend) + logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, SendLogLineTimeMeasurements{ + processDuration: totalTimeProcessLinesInSend, + sendDuration: totalTimeToSendLogsGranular, + parseTimestampDuratoin: totalTimestampParsing, + filterCheckDuration: totalFilterCheck, + retentionPeriodCheckDuration: totalRetentionCheck, + }) return err } } @@ -371,61 +400,113 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine( return nil } +type SendLogLineTimeMeasurements struct { + processDuration time.Duration + sendDuration time.Duration + parseTimestampDuratoin time.Duration + filterCheckDuration time.Duration + retentionPeriodCheckDuration time.Duration +} + func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( jsonLog JsonLog, logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, serviceUuid service.ServiceUUID, - conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (error, time.Duration, time.Duration) { + conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (error, SendLogLineTimeMeasurements) { // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} - var processDuration time.Duration var sendDuration time.Duration + var parseTimestampDuration time.Duration + var filterCheckDuration time.Duration + var retentionPeriodCheckDuration time.Duration processStart := time.Now() // Then extract the actual log message using the vectors log field logMsgStr, found := jsonLog[volume_consts.LogLabel] if !found { - return stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog), sendDuration, processDuration + return stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog), SendLogLineTimeMeasurements{ + processDuration: processDuration, + sendDuration: sendDuration, + parseTimestampDuratoin: parseTimestampDuration, + filterCheckDuration: filterCheckDuration, + retentionPeriodCheckDuration: retentionPeriodCheckDuration, + } } + timestampStart := time.Now() // Extract the timestamp using vectors timestamp field logTimestamp, err := parseTimestampFromJsonLogLine(jsonLog) if err != nil { - return stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line."), sendDuration, processDuration + return stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line."), SendLogLineTimeMeasurements{ + processDuration: processDuration, + sendDuration: sendDuration, + parseTimestampDuratoin: parseTimestampDuration, + filterCheckDuration: filterCheckDuration, + retentionPeriodCheckDuration: retentionPeriodCheckDuration, + } } logLine := logline.NewLogLine(logMsgStr, *logTimestamp) + parseTimestampDuration += time.Now().Sub(timestampStart) + filterStart := time.Now() // Then filter by checking if the log message is valid based on requested filters validLogLine, err := logLine.IsValidLogLineBaseOnFilters(conjunctiveLogLinesFiltersWithRegex) if err != nil { - return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), sendDuration, processDuration + return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), SendLogLineTimeMeasurements{ + processDuration: processDuration, + sendDuration: sendDuration, + parseTimestampDuratoin: parseTimestampDuration, + filterCheckDuration: filterCheckDuration, + retentionPeriodCheckDuration: retentionPeriodCheckDuration, + } } if !validLogLine { - return nil, sendDuration, processDuration + return nil, SendLogLineTimeMeasurements{ + processDuration: processDuration, + sendDuration: sendDuration, + parseTimestampDuratoin: parseTimestampDuration, + filterCheckDuration: filterCheckDuration, + retentionPeriodCheckDuration: retentionPeriodCheckDuration, + } } + filterCheckDuration += time.Now().Sub(filterStart) + retentionCheckStart := time.Now() // ensure this log line is within the retention period if it has a timestamp withinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) if err != nil { - return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), sendDuration, processDuration + return stacktrace.Propagate(err, "An error occurred determining whether log line '%+v' is within the retention period.", logLine), SendLogLineTimeMeasurements{} } if !withinRetentionPeriod { - return nil, sendDuration, processDuration + return nil, SendLogLineTimeMeasurements{ + processDuration: processDuration, + sendDuration: sendDuration, + parseTimestampDuratoin: parseTimestampDuration, + filterCheckDuration: filterCheckDuration, + retentionPeriodCheckDuration: retentionPeriodCheckDuration, + } } + retentionPeriodCheckDuration += time.Now().Sub(retentionCheckStart) // send the log line logLines := []logline.LogLine{*logLine} userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ serviceUuid: logLines, } - processDuration = time.Now().Sub(processStart) + processDuration += time.Now().Sub(processStart) sendStart := time.Now() logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap - sendDuration = time.Now().Sub(sendStart) - return nil, sendDuration, processDuration + sendDuration += time.Now().Sub(sendStart) + return nil, SendLogLineTimeMeasurements{ + processDuration: processDuration, + sendDuration: sendDuration, + parseTimestampDuratoin: parseTimestampDuration, + filterCheckDuration: filterCheckDuration, + retentionPeriodCheckDuration: retentionPeriodCheckDuration, + } } // Returns true if [logLine] has no timestamp @@ -509,10 +590,13 @@ func parseTimestampFromJsonLogLine(logLine JsonLog) (*time.Time, error) { return ×tamp, nil } -func logTimes(totalDuration, getLineDuration, totalSendLineDuration, actualSendLineDuration, processLineDuraction time.Duration) { +func logTimes(totalDuration, getLineDuration, totalSendLineDuration time.Duration, sendLogLineTM SendLogLineTimeMeasurements) { logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalDuration) logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO GET JSON LINES: %v", getLineDuration) logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES: %v", totalSendLineDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES ACROSS CHANNEL: %v", actualSendLineDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO PROCESS JSON LINES BEFORE SENDING: %v", processLineDuraction) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES ACROSS CHANNEL: %v", sendLogLineTM.sendDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO PROCESS JSON LINES BEFORE SENDING: %v", sendLogLineTM.processDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO PARSE TIMESTAMPS: %v", sendLogLineTM.parseTimestampDuratoin) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO FILTER LINES BASED ON REGEXES: %v", sendLogLineTM.filterCheckDuration) + logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO CHECK RETENTION PERIOD: %v", sendLogLineTM.retentionPeriodCheckDuration) } From f316abec01b89fc7e652346b34e4c9ff1e76c546 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Thu, 1 Aug 2024 02:47:10 -0400 Subject: [PATCH 04/24] batch send log lines --- .../persistent_volume_logs_database_client.go | 4 +- ...istent_volume_logs_database_client_test.go | 1554 ++++++++--------- .../per_week_stream_logs_strategy.go | 46 +- 3 files changed, 803 insertions(+), 801 deletions(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go index c8ef39d57d..1c44d1ef41 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go @@ -13,7 +13,7 @@ import ( ) const ( - logLineBufferSize = 100 + logLineBufferSize = 300 oneSenderAdded = 1 ) @@ -64,7 +64,7 @@ func (client *persistentVolumeLogsDatabaseClient) StreamUserServiceLogs( streamErrChan := make(chan error) // this channel will return the user service log lines by service UUID - logsByKurtosisUserServiceUuidChan := make(chan map[service.ServiceUUID][]logline.LogLine, logLineBufferSize) // MAKE IT A BUFFERED CHANNEL SEE HOW THAT IMPROVES THINGS + logsByKurtosisUserServiceUuidChan := make(chan map[service.ServiceUUID][]logline.LogLine, logLineBufferSize) wgSenders := &sync.WaitGroup{} for serviceUuid := range userServiceUuids { diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go index 82f14c00e9..801697aafa 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go @@ -1,21 +1,7 @@ package persistent_volume import ( - "context" - "fmt" - "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface" "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/enclave" - "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/logline" - "github.com/kurtosis-tech/stacktrace" - "github.com/stretchr/testify/require" - "strconv" - "strings" - "testing" "time" ) @@ -56,772 +42,774 @@ const ( defaultNumLogLines = 0 ) -func TestStreamUserServiceLogs_WithFilters(t *testing.T) { - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: 2, - testUserService2Uuid: 2, - testUserService3Uuid: 2, - } - - firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) - secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) - regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) - - logLinesFilters := []logline.LogLineFilter{ - *firstTextFilter, - *secondTextFilter, - *regexFilter, - } - - expectedFirstLogLine := "Starting feature 'runs idempotently'" - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - testUserService2Uuid: true, - testUserService3Uuid: true, - } - - underlyingFs := createFilledPerFileFilesystem() - perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perFileStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) - } -} - -func TestStreamUserServiceLogsPerWeek_WithFilters(t *testing.T) { - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: 2, - testUserService2Uuid: 2, - testUserService3Uuid: 2, - } - - firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) - secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) - regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) - - logLinesFilters := []logline.LogLineFilter{ - *firstTextFilter, - *secondTextFilter, - *regexFilter, - } - - expectedFirstLogLine := "Starting feature 'runs idempotently'" - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - testUserService2Uuid: true, - testUserService3Uuid: true, - } - - underlyingFs := createFilledPerWeekFilesystem(startingWeek) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) - } - - require.NoError(t, testEvaluationErr) -} - -func TestStreamUserServiceLogs_NoLogsFromPersistentVolume(t *testing.T) { - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: 0, - testUserService2Uuid: 0, - testUserService3Uuid: 0, - } - - firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) - - logLinesFilters := []logline.LogLineFilter{ - *firstTextFilter, - } - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - testUserService2Uuid: true, - testUserService3Uuid: true, - } - - underlyingFs := createEmptyPerFileFilesystem() - perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perFileStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - } -} - -func TestStreamUserServiceLogsPerWeek_NoLogsFromPersistentVolume(t *testing.T) { - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: 0, - testUserService2Uuid: 0, - testUserService3Uuid: 0, - } - - firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) - - logLinesFilters := []logline.LogLineFilter{ - *firstTextFilter, - } - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - testUserService2Uuid: true, - testUserService3Uuid: true, - } - - underlyingFs := createEmptyPerWeekFilesystem(startingWeek) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - } -} - -func TestStreamUserServiceLogs_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { - expectedAmountLogLines := 10_000 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var emptyFilters []logline.LogLineFilter - - expectedFirstLogLine := "Starting feature 'centralized logs'" - - var logLines []string - - for i := 0; i <= expectedAmountLogLines; i++ { - logLines = append(logLines, logLine1) - } - - logLinesStr := strings.Join(logLines, "\n") - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - - file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) - file1, err := underlyingFs.Create(file1PathStr) - require.NoError(t, err) - _, err = file1.WriteString(logLinesStr) - require.NoError(t, err) - - perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - emptyFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perFileStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) - } -} - -func TestStreamUserServiceLogsPerWeek_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { - expectedAmountLogLines := 10_000 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var emptyFilters []logline.LogLineFilter - - expectedFirstLogLine := "Starting feature 'centralized logs'" - - var logLines []string - - for i := 0; i <= expectedAmountLogLines; i++ { - logLines = append(logLines, logLine1) - } - - logLinesStr := strings.Join(logLines, "\n") - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", startingWeek) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) - file1, err := underlyingFs.Create(file1PathStr) - require.NoError(t, err) - _, err = file1.WriteString(logLinesStr) - require.NoError(t, err) - - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - emptyFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) - } -} - -func TestStreamUserServiceLogs_EmptyLogLines(t *testing.T) { - expectedAmountLogLines := 0 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var emptyFilters []logline.LogLineFilter - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - logLinesStr := "" - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - file1PathStr := fmt.Sprintf("%s%s/%s%s", volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) - file1, err := underlyingFs.Create(file1PathStr) - require.NoError(t, err) - _, err = file1.WriteString(logLinesStr) - require.NoError(t, err) - - perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - emptyFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perFileStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - } -} - -func TestStreamUserServiceLogsPerWeek_EmptyLogLines(t *testing.T) { - expectedAmountLogLines := 0 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var emptyFilters []logline.LogLineFilter - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - logLinesStr := "" - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - formattedWeekNum := fmt.Sprintf("%02d", startingWeek) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) - file1, err := underlyingFs.Create(file1PathStr) - require.NoError(t, err) - _, err = file1.WriteString(logLinesStr) - require.NoError(t, err) - - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - emptyFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - } -} - -func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { - expectedAmountLogLines := 8 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var logLinesFilters []logline.LogLineFilter - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - expectedFirstLogLine := "Starting feature 'centralized logs'" - - week4logLines := []string{ - logLine5, - logLine6, - logLine7, - logLine8} - week3logLines := []string{ - logLine1, - logLine2, - logLine3a, - logLine3b, - logLine4} - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - - week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" - week4logLinesStr := strings.Join(week4logLines, "\n") - - formattedWeekFour := fmt.Sprintf("%02d", 4) - week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - week4, err := underlyingFs.Create(week4filepath) - require.NoError(t, err) - _, err = week4.WriteString(week4logLinesStr) - require.NoError(t, err) - - formattedWeekThree := fmt.Sprintf("%02d", 3) - week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - week3, err := underlyingFs.Create(week3filepath) - require.NoError(t, err) - _, err = week3.WriteString(week3logLinesStr) - require.NoError(t, err) - - mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) - } - -} - -func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { - expectedAmountLogLines := 8 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var logLinesFilters []logline.LogLineFilter - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - expectedFirstLogLine := "Starting feature 'centralized logs'" - - week4logLines := []string{ - logLine3b, - logLine4, - logLine5, - logLine6, - logLine7, - logLine8} - week3logLines := []string{ - logLine1, - logLine2, - logLine3a} - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - - week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" - week4logLinesStr := strings.Join(week4logLines, "\n") + "\n" - - formattedWeekFour := fmt.Sprintf("%02d", 4) - week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - week4, err := underlyingFs.Create(week4filepath) - require.NoError(t, err) - _, err = week4.WriteString(week4logLinesStr) - require.NoError(t, err) - - formattedWeekThree := fmt.Sprintf("%02d", 3) - week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - week3, err := underlyingFs.Create(week3filepath) - require.NoError(t, err) - _, err = week3.WriteString(week3logLinesStr) - require.NoError(t, err) - - mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) - } -} - -func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { - expectedAmountLogLines := 3 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var logLinesFilters []logline.LogLineFilter - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) - timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) - timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) - - timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} - timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - - formattedWeekNum := fmt.Sprintf("%02d", startingWeek) - filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file, err := underlyingFs.Create(filepath) - require.NoError(t, err) - _, err = file.WriteString(timestampedLogLinesStr) - require.NoError(t, err) - - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) - require.NoError(t, err) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perWeekStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - for _, logLine := range serviceLogLines { - require.Equal(t, expectedTime, logLine.GetTimestamp()) - } - } -} - -func TestStreamUserServiceLogsPerFileReturnsTimestampedLogLines(t *testing.T) { - expectedAmountLogLines := 3 - - expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ - testUserService1Uuid: expectedAmountLogLines, - } - - var logLinesFilters []logline.LogLineFilter - - userServiceUuids := map[service.ServiceUUID]bool{ - testUserService1Uuid: true, - } - - timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) - timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) - timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) - - timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} - timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" - - underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - - filepath := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file, err := underlyingFs.Create(filepath) - require.NoError(t, err) - _, err = file.WriteString(timestampedLogLinesStr) - require.NoError(t, err) - - perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() - - expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) - require.NoError(t, err) - - receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( - t, - logLinesFilters, - userServiceUuids, - expectedServiceAmountLogLinesByServiceUuid, - doNotFollowLogs, - underlyingFs, - perFileStreamStrategy, - ) - require.NoError(t, testEvaluationErr) - - for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { - expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] - require.True(t, found) - require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) - require.Equal(t, expectedTime, serviceLogLines[0].GetTimestamp()) - } -} - -// ==================================================================================================== -// -// Private helper functions -// -// ==================================================================================================== -func executeStreamCallAndGetReceivedServiceLogLines( - t *testing.T, - logLinesFilters []logline.LogLineFilter, - userServiceUuids map[service.ServiceUUID]bool, - expectedServiceAmountLogLinesByServiceUuid map[service.ServiceUUID]int, - shouldFollowLogs bool, - underlyingFs volume_filesystem.VolumeFilesystem, - streamStrategy stream_logs_strategy.StreamLogsStrategy, -) (map[service.ServiceUUID][]logline.LogLine, error) { - ctx := context.Background() - - receivedServiceLogsByUuid := map[service.ServiceUUID][]logline.LogLine{} - - for serviceUuid := range expectedServiceAmountLogLinesByServiceUuid { - receivedServiceLogsByUuid[serviceUuid] = []logline.LogLine{} - } - - kurtosisBackend := backend_interface.NewMockKurtosisBackend(t) - - logsDatabaseClient := NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, underlyingFs, streamStrategy) - - userServiceLogsByUuidChan, errChan, receivedCancelCtxFunc, err := logsDatabaseClient.StreamUserServiceLogs(ctx, enclaveUuid, userServiceUuids, logLinesFilters, shouldFollowLogs, defaultShouldReturnAllLogs, defaultNumLogLines) - if err != nil { - return nil, stacktrace.Propagate(err, "An error occurred getting user service logs for UUIDs '%+v' using log line filters '%v' in enclave '%v'", userServiceUuids, logLinesFilters, enclaveUuid) - } - defer func() { - if receivedCancelCtxFunc != nil { - receivedCancelCtxFunc() - } - }() - - require.NotNil(t, userServiceLogsByUuidChan, "Received a nil user service logs channel, but a non-nil value was expected") - require.NotNil(t, errChan, "Received a nil error logs channel, but a non-nil value was expected") - - shouldReceiveStream := true - for shouldReceiveStream { - select { - case <-time.Tick(testTimeOut): - return nil, stacktrace.NewError("Receiving stream logs in the test has reached the '%v' time out", testTimeOut) - case streamErr, isChanOpen := <-errChan: - if !isChanOpen { - shouldReceiveStream = false - break - } - return nil, stacktrace.Propagate(streamErr, "Receiving streaming error.") - case userServiceLogsByUuid, isChanOpen := <-userServiceLogsByUuidChan: - if !isChanOpen { - shouldReceiveStream = false - break - } - - for serviceUuid, serviceLogLines := range userServiceLogsByUuid { - _, found := userServiceUuids[serviceUuid] - require.True(t, found) - - currentServiceLogLines := receivedServiceLogsByUuid[serviceUuid] - allServiceLogLines := append(currentServiceLogLines, serviceLogLines...) - receivedServiceLogsByUuid[serviceUuid] = allServiceLogLines - } - - for serviceUuid, expectedAmountLogLines := range expectedServiceAmountLogLinesByServiceUuid { - if len(receivedServiceLogsByUuid[serviceUuid]) == expectedAmountLogLines { - shouldReceiveStream = false - } else { - shouldReceiveStream = true - break - } - } - } - } - - return receivedServiceLogsByUuid, nil -} - -func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { - logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} - - logLinesStr := strings.Join(logLines, "\n") - - file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) - - mapFs := volume_filesystem.NewMockedVolumeFilesystem() - - file1, _ := mapFs.Create(file1PathStr) - _, _ = file1.WriteString(logLinesStr) - - file2, _ := mapFs.Create(file2PathStr) - _, _ = file2.WriteString(logLinesStr) - - file3, _ := mapFs.Create(file3PathStr) - _, _ = file3.WriteString(logLinesStr) - - return mapFs -} - -func createFilledPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { - logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} - - logLinesStr := strings.Join(logLines, "\n") - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", week) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) - - mapFs := volume_filesystem.NewMockedVolumeFilesystem() - - file1, _ := mapFs.Create(file1PathStr) - _, _ = file1.WriteString(logLinesStr) - - file2, _ := mapFs.Create(file2PathStr) - _, _ = file2.WriteString(logLinesStr) - - file3, _ := mapFs.Create(file3PathStr) - _, _ = file3.WriteString(logLinesStr) - - return mapFs -} - -func createEmptyPerFileFilesystem() volume_filesystem.VolumeFilesystem { - file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) - - mapFs := volume_filesystem.NewMockedVolumeFilesystem() - - _, _ = mapFs.Create(file1PathStr) - _, _ = mapFs.Create(file2PathStr) - _, _ = mapFs.Create(file3PathStr) - - return mapFs -} - -func createEmptyPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", week) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) - - mapFs := volume_filesystem.NewMockedVolumeFilesystem() - - _, _ = mapFs.Create(file1PathStr) - _, _ = mapFs.Create(file2PathStr) - _, _ = mapFs.Create(file3PathStr) - - return mapFs -} +// +//func TestStreamUserServiceLogs_WithFilters(t *testing.T) { +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: 2, +// testUserService2Uuid: 2, +// testUserService3Uuid: 2, +// } +// +// firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) +// secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) +// regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) +// +// logLinesFilters := []logline.LogLineFilter{ +// *firstTextFilter, +// *secondTextFilter, +// *regexFilter, +// } +// +// expectedFirstLogLine := "Starting feature 'runs idempotently'" +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// testUserService2Uuid: true, +// testUserService3Uuid: true, +// } +// +// underlyingFs := createFilledPerFileFilesystem() +// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perFileStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) +// } +//} +// +//func TestStreamUserServiceLogsPerWeek_WithFilters(t *testing.T) { +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: 2, +// testUserService2Uuid: 2, +// testUserService3Uuid: 2, +// } +// +// firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) +// secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) +// regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) +// +// logLinesFilters := []logline.LogLineFilter{ +// *firstTextFilter, +// *secondTextFilter, +// *regexFilter, +// } +// +// expectedFirstLogLine := "Starting feature 'runs idempotently'" +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// testUserService2Uuid: true, +// testUserService3Uuid: true, +// } +// +// underlyingFs := createFilledPerWeekFilesystem(startingWeek) +// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) +// } +// +// require.NoError(t, testEvaluationErr) +//} +// +//func TestStreamUserServiceLogs_NoLogsFromPersistentVolume(t *testing.T) { +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: 0, +// testUserService2Uuid: 0, +// testUserService3Uuid: 0, +// } +// +// firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) +// +// logLinesFilters := []logline.LogLineFilter{ +// *firstTextFilter, +// } +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// testUserService2Uuid: true, +// testUserService3Uuid: true, +// } +// +// underlyingFs := createEmptyPerFileFilesystem() +// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perFileStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// } +//} +// +//func TestStreamUserServiceLogsPerWeek_NoLogsFromPersistentVolume(t *testing.T) { +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: 0, +// testUserService2Uuid: 0, +// testUserService3Uuid: 0, +// } +// +// firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) +// +// logLinesFilters := []logline.LogLineFilter{ +// *firstTextFilter, +// } +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// testUserService2Uuid: true, +// testUserService3Uuid: true, +// } +// +// underlyingFs := createEmptyPerWeekFilesystem(startingWeek) +// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// } +//} +// +//func TestStreamUserServiceLogs_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { +// expectedAmountLogLines := 10_000 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var emptyFilters []logline.LogLineFilter +// +// expectedFirstLogLine := "Starting feature 'centralized logs'" +// +// var logLines []string +// +// for i := 0; i <= expectedAmountLogLines; i++ { +// logLines = append(logLines, logLine1) +// } +// +// logLinesStr := strings.Join(logLines, "\n") +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) +// file1, err := underlyingFs.Create(file1PathStr) +// require.NoError(t, err) +// _, err = file1.WriteString(logLinesStr) +// require.NoError(t, err) +// +// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// emptyFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perFileStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) +// } +//} +// +//func TestStreamUserServiceLogsPerWeek_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { +// expectedAmountLogLines := 10_000 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var emptyFilters []logline.LogLineFilter +// +// expectedFirstLogLine := "Starting feature 'centralized logs'" +// +// var logLines []string +// +// for i := 0; i <= expectedAmountLogLines; i++ { +// logLines = append(logLines, logLine1) +// } +// +// logLinesStr := strings.Join(logLines, "\n") +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format +// formattedWeekNum := fmt.Sprintf("%02d", startingWeek) +// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) +// file1, err := underlyingFs.Create(file1PathStr) +// require.NoError(t, err) +// _, err = file1.WriteString(logLinesStr) +// require.NoError(t, err) +// +// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// emptyFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) +// } +//} +// +//func TestStreamUserServiceLogs_EmptyLogLines(t *testing.T) { +// expectedAmountLogLines := 0 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var emptyFilters []logline.LogLineFilter +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// logLinesStr := "" +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// file1PathStr := fmt.Sprintf("%s%s/%s%s", volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) +// file1, err := underlyingFs.Create(file1PathStr) +// require.NoError(t, err) +// _, err = file1.WriteString(logLinesStr) +// require.NoError(t, err) +// +// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// emptyFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perFileStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// } +//} +// +//func TestStreamUserServiceLogsPerWeek_EmptyLogLines(t *testing.T) { +// expectedAmountLogLines := 0 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var emptyFilters []logline.LogLineFilter +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// logLinesStr := "" +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// formattedWeekNum := fmt.Sprintf("%02d", startingWeek) +// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) +// file1, err := underlyingFs.Create(file1PathStr) +// require.NoError(t, err) +// _, err = file1.WriteString(logLinesStr) +// require.NoError(t, err) +// +// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// emptyFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// } +//} +// +//func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { +// expectedAmountLogLines := 8 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var logLinesFilters []logline.LogLineFilter +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// expectedFirstLogLine := "Starting feature 'centralized logs'" +// +// week4logLines := []string{ +// logLine5, +// logLine6, +// logLine7, +// logLine8} +// week3logLines := []string{ +// logLine1, +// logLine2, +// logLine3a, +// logLine3b, +// logLine4} +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" +// week4logLinesStr := strings.Join(week4logLines, "\n") +// +// formattedWeekFour := fmt.Sprintf("%02d", 4) +// week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// week4, err := underlyingFs.Create(week4filepath) +// require.NoError(t, err) +// _, err = week4.WriteString(week4logLinesStr) +// require.NoError(t, err) +// +// formattedWeekThree := fmt.Sprintf("%02d", 3) +// week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// week3, err := underlyingFs.Create(week3filepath) +// require.NoError(t, err) +// _, err = week3.WriteString(week3logLinesStr) +// require.NoError(t, err) +// +// mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) +// } +// +//} +// +//func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { +// expectedAmountLogLines := 8 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var logLinesFilters []logline.LogLineFilter +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// expectedFirstLogLine := "Starting feature 'centralized logs'" +// +// week4logLines := []string{ +// logLine3b, +// logLine4, +// logLine5, +// logLine6, +// logLine7, +// logLine8} +// week3logLines := []string{ +// logLine1, +// logLine2, +// logLine3a} +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" +// week4logLinesStr := strings.Join(week4logLines, "\n") + "\n" +// +// formattedWeekFour := fmt.Sprintf("%02d", 4) +// week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// week4, err := underlyingFs.Create(week4filepath) +// require.NoError(t, err) +// _, err = week4.WriteString(week4logLinesStr) +// require.NoError(t, err) +// +// formattedWeekThree := fmt.Sprintf("%02d", 3) +// week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// week3, err := underlyingFs.Create(week3filepath) +// require.NoError(t, err) +// _, err = week3.WriteString(week3logLinesStr) +// require.NoError(t, err) +// +// mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) +// } +//} +// +//func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { +// expectedAmountLogLines := 3 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var logLinesFilters []logline.LogLineFilter +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) +// timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) +// timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) +// +// timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} +// timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// formattedWeekNum := fmt.Sprintf("%02d", startingWeek) +// filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// file, err := underlyingFs.Create(filepath) +// require.NoError(t, err) +// _, err = file.WriteString(timestampedLogLinesStr) +// require.NoError(t, err) +// +// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) +// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) +// +// expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) +// require.NoError(t, err) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perWeekStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// for _, logLine := range serviceLogLines { +// require.Equal(t, expectedTime, logLine.GetTimestamp()) +// } +// } +//} +// +//func TestStreamUserServiceLogsPerFileReturnsTimestampedLogLines(t *testing.T) { +// t.Skip() +// expectedAmountLogLines := 3 +// +// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ +// testUserService1Uuid: expectedAmountLogLines, +// } +// +// var logLinesFilters []logline.LogLineFilter +// +// userServiceUuids := map[service.ServiceUUID]bool{ +// testUserService1Uuid: true, +// } +// +// timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) +// timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) +// timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) +// +// timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} +// timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" +// +// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// filepath := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// file, err := underlyingFs.Create(filepath) +// require.NoError(t, err) +// _, err = file.WriteString(timestampedLogLinesStr) +// require.NoError(t, err) +// +// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() +// +// expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) +// require.NoError(t, err) +// +// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( +// t, +// logLinesFilters, +// userServiceUuids, +// expectedServiceAmountLogLinesByServiceUuid, +// doNotFollowLogs, +// underlyingFs, +// perFileStreamStrategy, +// ) +// require.NoError(t, testEvaluationErr) +// +// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { +// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] +// require.True(t, found) +// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) +// require.Equal(t, expectedTime, serviceLogLines[0].GetTimestamp()) +// } +//} +// +//// ==================================================================================================== +//// +//// Private helper functions +//// +//// ==================================================================================================== +//func executeStreamCallAndGetReceivedServiceLogLines( +// t *testing.T, +// logLinesFilters []logline.LogLineFilter, +// userServiceUuids map[service.ServiceUUID]bool, +// expectedServiceAmountLogLinesByServiceUuid map[service.ServiceUUID]int, +// shouldFollowLogs bool, +// underlyingFs volume_filesystem.VolumeFilesystem, +// streamStrategy stream_logs_strategy.StreamLogsStrategy, +//) (map[service.ServiceUUID][]logline.LogLine, error) { +// ctx := context.Background() +// +// receivedServiceLogsByUuid := map[service.ServiceUUID][]logline.LogLine{} +// +// for serviceUuid := range expectedServiceAmountLogLinesByServiceUuid { +// receivedServiceLogsByUuid[serviceUuid] = []logline.LogLine{} +// } +// +// kurtosisBackend := backend_interface.NewMockKurtosisBackend(t) +// +// logsDatabaseClient := NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, underlyingFs, streamStrategy) +// +// userServiceLogsByUuidChan, errChan, receivedCancelCtxFunc, err := logsDatabaseClient.StreamUserServiceLogs(ctx, enclaveUuid, userServiceUuids, logLinesFilters, shouldFollowLogs, defaultShouldReturnAllLogs, defaultNumLogLines) +// if err != nil { +// return nil, stacktrace.Propagate(err, "An error occurred getting user service logs for UUIDs '%+v' using log line filters '%v' in enclave '%v'", userServiceUuids, logLinesFilters, enclaveUuid) +// } +// defer func() { +// if receivedCancelCtxFunc != nil { +// receivedCancelCtxFunc() +// } +// }() +// +// require.NotNil(t, userServiceLogsByUuidChan, "Received a nil user service logs channel, but a non-nil value was expected") +// require.NotNil(t, errChan, "Received a nil error logs channel, but a non-nil value was expected") +// +// shouldReceiveStream := true +// for shouldReceiveStream { +// select { +// case <-time.Tick(testTimeOut): +// return nil, stacktrace.NewError("Receiving stream logs in the test has reached the '%v' time out", testTimeOut) +// case streamErr, isChanOpen := <-errChan: +// if !isChanOpen { +// shouldReceiveStream = false +// break +// } +// return nil, stacktrace.Propagate(streamErr, "Receiving streaming error.") +// case userServiceLogsByUuid, isChanOpen := <-userServiceLogsByUuidChan: +// if !isChanOpen { +// shouldReceiveStream = false +// break +// } +// +// for serviceUuid, serviceLogLines := range userServiceLogsByUuid { +// _, found := userServiceUuids[serviceUuid] +// require.True(t, found) +// +// currentServiceLogLines := receivedServiceLogsByUuid[serviceUuid] +// allServiceLogLines := append(currentServiceLogLines, serviceLogLines...) +// receivedServiceLogsByUuid[serviceUuid] = allServiceLogLines +// } +// +// for serviceUuid, expectedAmountLogLines := range expectedServiceAmountLogLinesByServiceUuid { +// if len(receivedServiceLogsByUuid[serviceUuid]) == expectedAmountLogLines { +// shouldReceiveStream = false +// } else { +// shouldReceiveStream = true +// break +// } +// } +// } +// } +// +// return receivedServiceLogsByUuid, nil +//} +// +//func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { +// logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} +// +// logLinesStr := strings.Join(logLines, "\n") +// +// file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) +// file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) +// +// mapFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// file1, _ := mapFs.Create(file1PathStr) +// _, _ = file1.WriteString(logLinesStr) +// +// file2, _ := mapFs.Create(file2PathStr) +// _, _ = file2.WriteString(logLinesStr) +// +// file3, _ := mapFs.Create(file3PathStr) +// _, _ = file3.WriteString(logLinesStr) +// +// return mapFs +//} +// +//func createFilledPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { +// logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} +// +// logLinesStr := strings.Join(logLines, "\n") +// // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format +// formattedWeekNum := fmt.Sprintf("%02d", week) +// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) +// file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) +// +// mapFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// file1, _ := mapFs.Create(file1PathStr) +// _, _ = file1.WriteString(logLinesStr) +// +// file2, _ := mapFs.Create(file2PathStr) +// _, _ = file2.WriteString(logLinesStr) +// +// file3, _ := mapFs.Create(file3PathStr) +// _, _ = file3.WriteString(logLinesStr) +// +// return mapFs +//} +// +//func createEmptyPerFileFilesystem() volume_filesystem.VolumeFilesystem { +// file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) +// file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) +// +// mapFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// _, _ = mapFs.Create(file1PathStr) +// _, _ = mapFs.Create(file2PathStr) +// _, _ = mapFs.Create(file3PathStr) +// +// return mapFs +//} +// +//func createEmptyPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { +// // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format +// formattedWeekNum := fmt.Sprintf("%02d", week) +// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) +// file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) +// file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) +// +// mapFs := volume_filesystem.NewMockedVolumeFilesystem() +// +// _, _ = mapFs.Create(file1PathStr) +// _, _ = mapFs.Create(file2PathStr) +// _, _ = mapFs.Create(file3PathStr) +// +// return mapFs +//} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index c0d339dd2f..d0798df8fa 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -25,7 +25,8 @@ import ( ) const ( - oneWeek = 7 * 24 * time.Hour + oneWeek = 7 * 24 * time.Hour + batchLogsAmount = 50 ) // PerWeekStreamLogsStrategy pulls logs from filesystem where there is a log file per year, per week, per enclave, per service @@ -197,6 +198,7 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( var totalRetentionCheck time.Duration var ltm SendLogLineTimeMeasurements + var logLineBuffer []logline.LogLine for { select { case <-ctx.Done(): @@ -217,19 +219,21 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( totalTimeToGetJsonStrings += time.Now().Sub(getJsonStartTime) if isValidJsonEnding(jsonLogStr) { + var logLine logline.LogLine jsonLog, err := convertStringToJson(jsonLogStr) if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } sendJsonLogLineStartTime := time.Now() - err, ltm = strategy.sendJsonLogLineWithTimes(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) + logLine, err, ltm = strategy.sendJsonLogLineWithTimes(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) if err != nil { return err } + logLineBuffer = append(logLineBuffer, logLine) totalTimeToSendJsonLogs += time.Now().Sub(sendJsonLogLineStartTime) - totalTimeToSendLogsGranular += ltm.sendDuration + //totalTimeToSendLogsGranular += ltm.sendDuration totalTimeProcessLinesInSend += ltm.processDuration totalTimestampParsing += ltm.parseTimestampDuratoin totalFilterCheck += ltm.filterCheckDuration @@ -239,6 +243,16 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( totalLogFileReadDuration += endTime.Sub(startTime) } + if len(logLineBuffer)%batchLogsAmount == 0 { + sendAcrossChannelStartTime := time.Now() + userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ + serviceUuid: logLineBuffer, + } + logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap + logLineBuffer = []logline.LogLine{} + totalTimeToSendLogsGranular += time.Now().Sub(sendAcrossChannelStartTime) + } + if err != nil { // if we've reached end of logs, return success, otherwise return the error if errors.Is(err, io.EOF) { @@ -412,7 +426,7 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( jsonLog JsonLog, logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, serviceUuid service.ServiceUUID, - conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (error, SendLogLineTimeMeasurements) { + conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (logline.LogLine, error, SendLogLineTimeMeasurements) { // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} @@ -426,7 +440,7 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( // Then extract the actual log message using the vectors log field logMsgStr, found := jsonLog[volume_consts.LogLabel] if !found { - return stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog), SendLogLineTimeMeasurements{ + return logline.LogLine{}, stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog), SendLogLineTimeMeasurements{ processDuration: processDuration, sendDuration: sendDuration, parseTimestampDuratoin: parseTimestampDuration, @@ -439,7 +453,7 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( // Extract the timestamp using vectors timestamp field logTimestamp, err := parseTimestampFromJsonLogLine(jsonLog) if err != nil { - return stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line."), SendLogLineTimeMeasurements{ + return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line."), SendLogLineTimeMeasurements{ processDuration: processDuration, sendDuration: sendDuration, parseTimestampDuratoin: parseTimestampDuration, @@ -454,7 +468,7 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( // Then filter by checking if the log message is valid based on requested filters validLogLine, err := logLine.IsValidLogLineBaseOnFilters(conjunctiveLogLinesFiltersWithRegex) if err != nil { - return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), SendLogLineTimeMeasurements{ + return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), SendLogLineTimeMeasurements{ processDuration: processDuration, sendDuration: sendDuration, parseTimestampDuratoin: parseTimestampDuration, @@ -463,7 +477,7 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( } } if !validLogLine { - return nil, SendLogLineTimeMeasurements{ + return logline.LogLine{}, nil, SendLogLineTimeMeasurements{ processDuration: processDuration, sendDuration: sendDuration, parseTimestampDuratoin: parseTimestampDuration, @@ -477,10 +491,10 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( // ensure this log line is within the retention period if it has a timestamp withinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) if err != nil { - return stacktrace.Propagate(err, "An error occurred determining whether log line '%+v' is within the retention period.", logLine), SendLogLineTimeMeasurements{} + return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred determining whether log line '%+v' is within the retention period.", logLine), SendLogLineTimeMeasurements{} } if !withinRetentionPeriod { - return nil, SendLogLineTimeMeasurements{ + return logline.LogLine{}, nil, SendLogLineTimeMeasurements{ processDuration: processDuration, sendDuration: sendDuration, parseTimestampDuratoin: parseTimestampDuration, @@ -491,16 +505,16 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( retentionPeriodCheckDuration += time.Now().Sub(retentionCheckStart) // send the log line - logLines := []logline.LogLine{*logLine} - userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ - serviceUuid: logLines, - } + //logLines := []logline.LogLine{*logLine} + //userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ + // serviceUuid: logLines, + //} processDuration += time.Now().Sub(processStart) sendStart := time.Now() - logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap + //logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap sendDuration += time.Now().Sub(sendStart) - return nil, SendLogLineTimeMeasurements{ + return *logLine, nil, SendLogLineTimeMeasurements{ processDuration: processDuration, sendDuration: sendDuration, parseTimestampDuratoin: parseTimestampDuration, From 0614976b98af5db6969df425755bbf17675d4aab Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Thu, 8 Aug 2024 22:40:09 -0400 Subject: [PATCH 05/24] refactor to use log line sender --- .../per_week_stream_logs_strategy.go | 231 ++---------------- .../logline/logline_sender.go | 29 +++ 2 files changed, 52 insertions(+), 208 deletions(-) create mode 100644 engine/server/engine/centralized_logs/logline/logline_sender.go diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index d0798df8fa..99d2137c80 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -58,6 +58,7 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( shouldReturnAllLogs bool, numLogLines uint32, ) { + logLineSender := logline.NewLogLineSender(logsByKurtosisUserServiceUuidChan) paths, err := strategy.getLogFilePaths(fs, strategy.logRetentionPeriodInWeeks, string(enclaveUuid), string(serviceUuid)) if err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred retrieving log file paths for service '%v' in enclave '%v'.", serviceUuid, enclaveUuid) @@ -90,14 +91,12 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( }() if shouldReturnAllLogs { - startTime := time.Now() - if err := strategy.streamAllLogs(ctx, logsReader, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { + if err := strategy.streamAllLogs(ctx, logsReader, logLineSender, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred streaming all logs for service '%v' in enclave '%v'", serviceUuid, enclaveUuid) return } - logrus.Infof("TOTAL TIME IN STREAM ALL LOGS FUNCTION: %v", time.Now().Sub(startTime)) } else { - if err := strategy.streamTailLogs(ctx, logsReader, numLogLines, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { + if err := strategy.streamTailLogs(ctx, logsReader, numLogLines, logLineSender, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred streaming '%v' logs for service '%v' in enclave '%v'", numLogLines, serviceUuid, enclaveUuid) return } @@ -105,7 +104,7 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( if shouldFollowLogs { latestLogFile := paths[len(paths)-1] - if err := strategy.followLogs(ctx, latestLogFile, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { + if err := strategy.followLogs(ctx, latestLogFile, logLineSender, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred creating following logs for service '%v' in enclave '%v'", serviceUuid, enclaveUuid) return } @@ -183,40 +182,16 @@ func getLogsReader(filesystem volume_filesystem.VolumeFilesystem, logFilePaths [ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( ctx context.Context, logsReader *bufio.Reader, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, serviceUuid service.ServiceUUID, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) error { - - var totalLogFileReadDuration time.Duration - var totalTimeToGetJsonStrings time.Duration - var totalTimeToSendJsonLogs time.Duration - - var totalTimeToSendLogsGranular time.Duration - var totalTimeProcessLinesInSend time.Duration - var totalTimestampParsing time.Duration - var totalFilterCheck time.Duration - var totalRetentionCheck time.Duration - - var ltm SendLogLineTimeMeasurements - var logLineBuffer []logline.LogLine for { select { case <-ctx.Done(): logrus.Debugf("Context was canceled, stopping streaming service logs for service '%v'", serviceUuid) - logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, SendLogLineTimeMeasurements{ - processDuration: totalTimeProcessLinesInSend, - sendDuration: totalTimeToSendLogsGranular, - parseTimestampDuratoin: totalTimestampParsing, - filterCheckDuration: totalFilterCheck, - retentionPeriodCheckDuration: totalRetentionCheck, - }) return nil default: - startTime := time.Now() - - getJsonStartTime := time.Now() jsonLogStr, err := getCompleteJsonLogString(logsReader) - totalTimeToGetJsonStrings += time.Now().Sub(getJsonStartTime) if isValidJsonEnding(jsonLogStr) { var logLine logline.LogLine @@ -225,53 +200,18 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - sendJsonLogLineStartTime := time.Now() - logLine, err, ltm = strategy.sendJsonLogLineWithTimes(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) + logLine, err = strategy.processJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex) if err != nil { return err } - logLineBuffer = append(logLineBuffer, logLine) - totalTimeToSendJsonLogs += time.Now().Sub(sendJsonLogLineStartTime) - - //totalTimeToSendLogsGranular += ltm.sendDuration - totalTimeProcessLinesInSend += ltm.processDuration - totalTimestampParsing += ltm.parseTimestampDuratoin - totalFilterCheck += ltm.filterCheckDuration - totalRetentionCheck += ltm.retentionPeriodCheckDuration - - endTime := time.Now() - totalLogFileReadDuration += endTime.Sub(startTime) - } - - if len(logLineBuffer)%batchLogsAmount == 0 { - sendAcrossChannelStartTime := time.Now() - userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ - serviceUuid: logLineBuffer, - } - logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap - logLineBuffer = []logline.LogLine{} - totalTimeToSendLogsGranular += time.Now().Sub(sendAcrossChannelStartTime) + logLineSender.SendLogLine(serviceUuid, logLine) } if err != nil { // if we've reached end of logs, return success, otherwise return the error if errors.Is(err, io.EOF) { - logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, SendLogLineTimeMeasurements{ - processDuration: totalTimeProcessLinesInSend, - sendDuration: totalTimeToSendLogsGranular, - parseTimestampDuratoin: totalTimestampParsing, - filterCheckDuration: totalFilterCheck, - retentionPeriodCheckDuration: totalRetentionCheck, - }) return nil } else { - logTimes(totalLogFileReadDuration, totalTimeToGetJsonStrings, totalTimeToSendJsonLogs, SendLogLineTimeMeasurements{ - processDuration: totalTimeProcessLinesInSend, - sendDuration: totalTimeToSendLogsGranular, - parseTimestampDuratoin: totalTimestampParsing, - filterCheckDuration: totalFilterCheck, - retentionPeriodCheckDuration: totalRetentionCheck, - }) return err } } @@ -284,7 +224,7 @@ func (strategy *PerWeekStreamLogsStrategy) streamTailLogs( ctx context.Context, logsReader *bufio.Reader, numLogLines uint32, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, serviceUuid service.ServiceUUID, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) error { tailLogLines := make([]string, 0, numLogLines) @@ -322,9 +262,11 @@ func (strategy *PerWeekStreamLogsStrategy) streamTailLogs( if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - if err := strategy.sendJsonLogLine(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { + logLine, err := strategy.processJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex) + if err != nil { return err } + logLineSender.SendLogLine(serviceUuid, logLine) } return nil @@ -365,11 +307,9 @@ func isValidJsonEnding(line string) bool { return endOfLine == volume_consts.EndOfJsonLine } -func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine( +func (strategy *PerWeekStreamLogsStrategy) processJsonLogLine( jsonLog JsonLog, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, - serviceUuid service.ServiceUUID, - conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) error { + conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (logline.LogLine, error) { // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} @@ -377,150 +317,35 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine( // Then extract the actual log message using the vectors log field logMsgStr, found := jsonLog[volume_consts.LogLabel] if !found { - return stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog) + return logline.LogLine{}, stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog) } // Extract the timestamp using vectors timestamp field logTimestamp, err := parseTimestampFromJsonLogLine(jsonLog) if err != nil { - return stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line.") + return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line.") } logLine := logline.NewLogLine(logMsgStr, *logTimestamp) // Then filter by checking if the log message is valid based on requested filters validLogLine, err := logLine.IsValidLogLineBaseOnFilters(conjunctiveLogLinesFiltersWithRegex) if err != nil { - return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) + return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) } if !validLogLine { - return nil + return logline.LogLine{}, nil } // ensure this log line is within the retention period if it has a timestamp withinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) if err != nil { - return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) + return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) } if !withinRetentionPeriod { - return nil + return logline.LogLine{}, nil } - // send the log line - logLines := []logline.LogLine{*logLine} - userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ - serviceUuid: logLines, - } - logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap - return nil -} - -type SendLogLineTimeMeasurements struct { - processDuration time.Duration - sendDuration time.Duration - parseTimestampDuratoin time.Duration - filterCheckDuration time.Duration - retentionPeriodCheckDuration time.Duration -} - -func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLineWithTimes( - jsonLog JsonLog, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, - serviceUuid service.ServiceUUID, - conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (logline.LogLine, error, SendLogLineTimeMeasurements) { - // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} - // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", - // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} - var processDuration time.Duration - var sendDuration time.Duration - var parseTimestampDuration time.Duration - var filterCheckDuration time.Duration - var retentionPeriodCheckDuration time.Duration - - processStart := time.Now() - // Then extract the actual log message using the vectors log field - logMsgStr, found := jsonLog[volume_consts.LogLabel] - if !found { - return logline.LogLine{}, stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog), SendLogLineTimeMeasurements{ - processDuration: processDuration, - sendDuration: sendDuration, - parseTimestampDuratoin: parseTimestampDuration, - filterCheckDuration: filterCheckDuration, - retentionPeriodCheckDuration: retentionPeriodCheckDuration, - } - } - - timestampStart := time.Now() - // Extract the timestamp using vectors timestamp field - logTimestamp, err := parseTimestampFromJsonLogLine(jsonLog) - if err != nil { - return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line."), SendLogLineTimeMeasurements{ - processDuration: processDuration, - sendDuration: sendDuration, - parseTimestampDuratoin: parseTimestampDuration, - filterCheckDuration: filterCheckDuration, - retentionPeriodCheckDuration: retentionPeriodCheckDuration, - } - } - logLine := logline.NewLogLine(logMsgStr, *logTimestamp) - parseTimestampDuration += time.Now().Sub(timestampStart) - - filterStart := time.Now() - // Then filter by checking if the log message is valid based on requested filters - validLogLine, err := logLine.IsValidLogLineBaseOnFilters(conjunctiveLogLinesFiltersWithRegex) - if err != nil { - return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex), SendLogLineTimeMeasurements{ - processDuration: processDuration, - sendDuration: sendDuration, - parseTimestampDuratoin: parseTimestampDuration, - filterCheckDuration: filterCheckDuration, - retentionPeriodCheckDuration: retentionPeriodCheckDuration, - } - } - if !validLogLine { - return logline.LogLine{}, nil, SendLogLineTimeMeasurements{ - processDuration: processDuration, - sendDuration: sendDuration, - parseTimestampDuratoin: parseTimestampDuration, - filterCheckDuration: filterCheckDuration, - retentionPeriodCheckDuration: retentionPeriodCheckDuration, - } - } - filterCheckDuration += time.Now().Sub(filterStart) - - retentionCheckStart := time.Now() - // ensure this log line is within the retention period if it has a timestamp - withinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) - if err != nil { - return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred determining whether log line '%+v' is within the retention period.", logLine), SendLogLineTimeMeasurements{} - } - if !withinRetentionPeriod { - return logline.LogLine{}, nil, SendLogLineTimeMeasurements{ - processDuration: processDuration, - sendDuration: sendDuration, - parseTimestampDuratoin: parseTimestampDuration, - filterCheckDuration: filterCheckDuration, - retentionPeriodCheckDuration: retentionPeriodCheckDuration, - } - } - retentionPeriodCheckDuration += time.Now().Sub(retentionCheckStart) - - // send the log line - //logLines := []logline.LogLine{*logLine} - //userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ - // serviceUuid: logLines, - //} - processDuration += time.Now().Sub(processStart) - - sendStart := time.Now() - //logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap - sendDuration += time.Now().Sub(sendStart) - return *logLine, nil, SendLogLineTimeMeasurements{ - processDuration: processDuration, - sendDuration: sendDuration, - parseTimestampDuratoin: parseTimestampDuration, - filterCheckDuration: filterCheckDuration, - retentionPeriodCheckDuration: retentionPeriodCheckDuration, - } + return *logLine, nil } // Returns true if [logLine] has no timestamp @@ -534,7 +359,7 @@ func (strategy *PerWeekStreamLogsStrategy) isWithinRetentionPeriod(logLine *logl func (strategy *PerWeekStreamLogsStrategy) followLogs( ctx context.Context, filepath string, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, serviceUuid service.ServiceUUID, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex, ) error { @@ -575,10 +400,11 @@ func (strategy *PerWeekStreamLogsStrategy) followLogs( // if tail package fails to parse a valid new line, fail fast return stacktrace.NewError("hpcloud/tail returned the following line: '%v' that was not valid json.\nThis is potentially a bug in tailing package.", logLine.Text) } - err = strategy.sendJsonLogLine(jsonLog, logsByKurtosisUserServiceUuidChan, serviceUuid, conjunctiveLogLinesFiltersWithRegex) + processedLogLine, err := strategy.processJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex) if err != nil { return stacktrace.Propagate(err, "An error occurred sending json log line '%v'.", logLine.Text) } + logLineSender.SendLogLine(serviceUuid, processedLogLine) } } } @@ -603,14 +429,3 @@ func parseTimestampFromJsonLogLine(logLine JsonLog) (*time.Time, error) { } return ×tamp, nil } - -func logTimes(totalDuration, getLineDuration, totalSendLineDuration time.Duration, sendLogLineTM SendLogLineTimeMeasurements) { - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO READ FILES: %v", totalDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO GET JSON LINES: %v", getLineDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES: %v", totalSendLineDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO SEND JSON LINES ACROSS CHANNEL: %v", sendLogLineTM.sendDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO PROCESS JSON LINES BEFORE SENDING: %v", sendLogLineTM.processDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO PARSE TIMESTAMPS: %v", sendLogLineTM.parseTimestampDuratoin) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO FILTER LINES BASED ON REGEXES: %v", sendLogLineTM.filterCheckDuration) - logrus.Infof("LOGS DB CLIENT [per_week_stream_logs_strategy] TOTAL TIME TO CHECK RETENTION PERIOD: %v", sendLogLineTM.retentionPeriodCheckDuration) -} diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go new file mode 100644 index 0000000000..f1e3354b8b --- /dev/null +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -0,0 +1,29 @@ +package logline + +import "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" + +const ( + batchLogsAmount = 500 +) + +type LogLineSender struct { + logsChan chan map[service.ServiceUUID][]LogLine + + logLineBuffer []LogLine +} + +func NewLogLineSender(logsChan chan map[service.ServiceUUID][]LogLine) *LogLineSender { + return &LogLineSender{logsChan: logsChan} +} + +func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLine LogLine) { + sender.logLineBuffer = append(sender.logLineBuffer, logLine) + + if len(sender.logLineBuffer)%batchLogsAmount == 0 { + userServicesLogLinesMap := map[service.ServiceUUID][]LogLine{ + serviceUuid: sender.logLineBuffer, + } + sender.logsChan <- userServicesLogLinesMap + sender.logLineBuffer = []LogLine{} + } +} From 75fd409cb2f365b5a6992eb1f48ffc7bb230d5ce Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Thu, 8 Aug 2024 23:04:07 -0400 Subject: [PATCH 06/24] encapsulate buff channel inside log line sender --- .../persistent_volume_logs_database_client.go | 9 +++++---- .../per_file_stream_logs_strategy.go | 9 ++------- .../per_week_stream_logs_strategy.go | 6 ++---- .../stream_logs_strategy/stream_logs_strategy.go | 2 +- .../centralized_logs/logline/logline_sender.go | 16 +++++++++++++--- 5 files changed, 23 insertions(+), 19 deletions(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go index 1c44d1ef41..02e6544318 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go @@ -64,7 +64,8 @@ func (client *persistentVolumeLogsDatabaseClient) StreamUserServiceLogs( streamErrChan := make(chan error) // this channel will return the user service log lines by service UUID - logsByKurtosisUserServiceUuidChan := make(chan map[service.ServiceUUID][]logline.LogLine, logLineBufferSize) + logLineSender := logline.NewLogLineSender() + logsByKurtosisUserServiceUuidChan := logLineSender.GetLogsChannel() wgSenders := &sync.WaitGroup{} for serviceUuid := range userServiceUuids { @@ -72,7 +73,7 @@ func (client *persistentVolumeLogsDatabaseClient) StreamUserServiceLogs( go client.streamServiceLogLines( ctx, wgSenders, - logsByKurtosisUserServiceUuidChan, + logLineSender, streamErrChan, enclaveUuid, serviceUuid, @@ -131,7 +132,7 @@ func (client *persistentVolumeLogsDatabaseClient) FilterExistingServiceUuids( func (client *persistentVolumeLogsDatabaseClient) streamServiceLogLines( ctx context.Context, wgSenders *sync.WaitGroup, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, streamErrChan chan error, enclaveUuid enclave.EnclaveUUID, serviceUuid service.ServiceUUID, @@ -144,7 +145,7 @@ func (client *persistentVolumeLogsDatabaseClient) streamServiceLogLines( client.streamStrategy.StreamLogs( ctx, client.filesystem, - logsByKurtosisUserServiceUuidChan, + logLineSender, streamErrChan, enclaveUuid, serviceUuid, diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go index 7fa08c1594..6c1f7147b4 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go @@ -30,7 +30,7 @@ type JsonLog map[string]string func (strategy *PerFileStreamLogsStrategy) StreamLogs( ctx context.Context, fs volume_filesystem.VolumeFilesystem, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, streamErrChan chan error, enclaveUuid enclave.EnclaveUUID, serviceUuid service.ServiceUUID, @@ -122,12 +122,7 @@ func (strategy *PerFileStreamLogsStrategy) StreamLogs( break } - // send the log line - logLines := []logline.LogLine{*logLine} - userServicesLogLinesMap := map[service.ServiceUUID][]logline.LogLine{ - serviceUuid: logLines, - } - logsByKurtosisUserServiceUuidChan <- userServicesLogLinesMap + logLineSender.SendLogLine(serviceUuid, *logLine) } } } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index 99d2137c80..79fcd046fb 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -25,8 +25,7 @@ import ( ) const ( - oneWeek = 7 * 24 * time.Hour - batchLogsAmount = 50 + oneWeek = 7 * 24 * time.Hour ) // PerWeekStreamLogsStrategy pulls logs from filesystem where there is a log file per year, per week, per enclave, per service @@ -49,7 +48,7 @@ func NewPerWeekStreamLogsStrategy(time logs_clock.LogsClock, logRetentionPeriodI func (strategy *PerWeekStreamLogsStrategy) StreamLogs( ctx context.Context, fs volume_filesystem.VolumeFilesystem, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, streamErrChan chan error, enclaveUuid enclave.EnclaveUUID, serviceUuid service.ServiceUUID, @@ -58,7 +57,6 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( shouldReturnAllLogs bool, numLogLines uint32, ) { - logLineSender := logline.NewLogLineSender(logsByKurtosisUserServiceUuidChan) paths, err := strategy.getLogFilePaths(fs, strategy.logRetentionPeriodInWeeks, string(enclaveUuid), string(serviceUuid)) if err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred retrieving log file paths for service '%v' in enclave '%v'.", serviceUuid, enclaveUuid) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy.go index c8fa215b30..af00ed5646 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy.go @@ -15,7 +15,7 @@ type StreamLogsStrategy interface { StreamLogs( ctx context.Context, fs volume_filesystem.VolumeFilesystem, - logsByKurtosisUserServiceUuidChan chan map[service.ServiceUUID][]logline.LogLine, + logLineSender *logline.LogLineSender, streamErrChan chan error, enclaveUuid enclave.EnclaveUUID, serviceUuid service.ServiceUUID, diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index f1e3354b8b..871041dbc3 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -3,7 +3,8 @@ package logline import "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" const ( - batchLogsAmount = 500 + batchLogsAmount = 500 + logsChanBufferSize = 300 ) type LogLineSender struct { @@ -12,8 +13,11 @@ type LogLineSender struct { logLineBuffer []LogLine } -func NewLogLineSender(logsChan chan map[service.ServiceUUID][]LogLine) *LogLineSender { - return &LogLineSender{logsChan: logsChan} +func NewLogLineSender() *LogLineSender { + return &LogLineSender{ + logsChan: make(chan map[service.ServiceUUID][]LogLine, logsChanBufferSize), + logLineBuffer: []LogLine{}, + } } func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLine LogLine) { @@ -24,6 +28,12 @@ func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLin serviceUuid: sender.logLineBuffer, } sender.logsChan <- userServicesLogLinesMap + + // clear buffer after flushing it through the channel sender.logLineBuffer = []LogLine{} } } + +func (sender *LogLineSender) GetLogsChannel() chan map[service.ServiceUUID][]LogLine { + return sender.logsChan +} From 42f7a30723564ed53b6be837c1ae31d85cf8c865 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Thu, 8 Aug 2024 23:41:10 -0400 Subject: [PATCH 07/24] refactor again and get tests to pass --- ...istent_volume_logs_database_client_test.go | 1554 +++++++++-------- .../per_week_stream_logs_strategy.go | 29 +- .../logline/logline_sender.go | 28 +- 3 files changed, 813 insertions(+), 798 deletions(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go index 801697aafa..4bcb3378ff 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go @@ -1,7 +1,21 @@ package persistent_volume import ( + "context" + "fmt" + "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface" "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/enclave" + "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/logline" + "github.com/kurtosis-tech/stacktrace" + "github.com/stretchr/testify/require" + "strconv" + "strings" + "testing" "time" ) @@ -42,774 +56,772 @@ const ( defaultNumLogLines = 0 ) -// -//func TestStreamUserServiceLogs_WithFilters(t *testing.T) { -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: 2, -// testUserService2Uuid: 2, -// testUserService3Uuid: 2, -// } -// -// firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) -// secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) -// regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) -// -// logLinesFilters := []logline.LogLineFilter{ -// *firstTextFilter, -// *secondTextFilter, -// *regexFilter, -// } -// -// expectedFirstLogLine := "Starting feature 'runs idempotently'" -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// testUserService2Uuid: true, -// testUserService3Uuid: true, -// } -// -// underlyingFs := createFilledPerFileFilesystem() -// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perFileStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) -// } -//} -// -//func TestStreamUserServiceLogsPerWeek_WithFilters(t *testing.T) { -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: 2, -// testUserService2Uuid: 2, -// testUserService3Uuid: 2, -// } -// -// firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) -// secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) -// regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) -// -// logLinesFilters := []logline.LogLineFilter{ -// *firstTextFilter, -// *secondTextFilter, -// *regexFilter, -// } -// -// expectedFirstLogLine := "Starting feature 'runs idempotently'" -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// testUserService2Uuid: true, -// testUserService3Uuid: true, -// } -// -// underlyingFs := createFilledPerWeekFilesystem(startingWeek) -// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) -// } -// -// require.NoError(t, testEvaluationErr) -//} -// -//func TestStreamUserServiceLogs_NoLogsFromPersistentVolume(t *testing.T) { -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: 0, -// testUserService2Uuid: 0, -// testUserService3Uuid: 0, -// } -// -// firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) -// -// logLinesFilters := []logline.LogLineFilter{ -// *firstTextFilter, -// } -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// testUserService2Uuid: true, -// testUserService3Uuid: true, -// } -// -// underlyingFs := createEmptyPerFileFilesystem() -// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perFileStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// } -//} -// -//func TestStreamUserServiceLogsPerWeek_NoLogsFromPersistentVolume(t *testing.T) { -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: 0, -// testUserService2Uuid: 0, -// testUserService3Uuid: 0, -// } -// -// firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) -// -// logLinesFilters := []logline.LogLineFilter{ -// *firstTextFilter, -// } -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// testUserService2Uuid: true, -// testUserService3Uuid: true, -// } -// -// underlyingFs := createEmptyPerWeekFilesystem(startingWeek) -// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// } -//} -// -//func TestStreamUserServiceLogs_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { -// expectedAmountLogLines := 10_000 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var emptyFilters []logline.LogLineFilter -// -// expectedFirstLogLine := "Starting feature 'centralized logs'" -// -// var logLines []string -// -// for i := 0; i <= expectedAmountLogLines; i++ { -// logLines = append(logLines, logLine1) -// } -// -// logLinesStr := strings.Join(logLines, "\n") -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) -// file1, err := underlyingFs.Create(file1PathStr) -// require.NoError(t, err) -// _, err = file1.WriteString(logLinesStr) -// require.NoError(t, err) -// -// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// emptyFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perFileStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) -// } -//} -// -//func TestStreamUserServiceLogsPerWeek_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { -// expectedAmountLogLines := 10_000 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var emptyFilters []logline.LogLineFilter -// -// expectedFirstLogLine := "Starting feature 'centralized logs'" -// -// var logLines []string -// -// for i := 0; i <= expectedAmountLogLines; i++ { -// logLines = append(logLines, logLine1) -// } -// -// logLinesStr := strings.Join(logLines, "\n") -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format -// formattedWeekNum := fmt.Sprintf("%02d", startingWeek) -// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) -// file1, err := underlyingFs.Create(file1PathStr) -// require.NoError(t, err) -// _, err = file1.WriteString(logLinesStr) -// require.NoError(t, err) -// -// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// emptyFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) -// } -//} -// -//func TestStreamUserServiceLogs_EmptyLogLines(t *testing.T) { -// expectedAmountLogLines := 0 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var emptyFilters []logline.LogLineFilter -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// logLinesStr := "" -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// file1PathStr := fmt.Sprintf("%s%s/%s%s", volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) -// file1, err := underlyingFs.Create(file1PathStr) -// require.NoError(t, err) -// _, err = file1.WriteString(logLinesStr) -// require.NoError(t, err) -// -// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// emptyFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perFileStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// } -//} -// -//func TestStreamUserServiceLogsPerWeek_EmptyLogLines(t *testing.T) { -// expectedAmountLogLines := 0 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var emptyFilters []logline.LogLineFilter -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// logLinesStr := "" -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// formattedWeekNum := fmt.Sprintf("%02d", startingWeek) -// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) -// file1, err := underlyingFs.Create(file1PathStr) -// require.NoError(t, err) -// _, err = file1.WriteString(logLinesStr) -// require.NoError(t, err) -// -// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// emptyFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// } -//} -// -//func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { -// expectedAmountLogLines := 8 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var logLinesFilters []logline.LogLineFilter -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// expectedFirstLogLine := "Starting feature 'centralized logs'" -// -// week4logLines := []string{ -// logLine5, -// logLine6, -// logLine7, -// logLine8} -// week3logLines := []string{ -// logLine1, -// logLine2, -// logLine3a, -// logLine3b, -// logLine4} -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" -// week4logLinesStr := strings.Join(week4logLines, "\n") -// -// formattedWeekFour := fmt.Sprintf("%02d", 4) -// week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// week4, err := underlyingFs.Create(week4filepath) -// require.NoError(t, err) -// _, err = week4.WriteString(week4logLinesStr) -// require.NoError(t, err) -// -// formattedWeekThree := fmt.Sprintf("%02d", 3) -// week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// week3, err := underlyingFs.Create(week3filepath) -// require.NoError(t, err) -// _, err = week3.WriteString(week3logLinesStr) -// require.NoError(t, err) -// -// mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) -// } -// -//} -// -//func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { -// expectedAmountLogLines := 8 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var logLinesFilters []logline.LogLineFilter -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// expectedFirstLogLine := "Starting feature 'centralized logs'" -// -// week4logLines := []string{ -// logLine3b, -// logLine4, -// logLine5, -// logLine6, -// logLine7, -// logLine8} -// week3logLines := []string{ -// logLine1, -// logLine2, -// logLine3a} -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" -// week4logLinesStr := strings.Join(week4logLines, "\n") + "\n" -// -// formattedWeekFour := fmt.Sprintf("%02d", 4) -// week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// week4, err := underlyingFs.Create(week4filepath) -// require.NoError(t, err) -// _, err = week4.WriteString(week4logLinesStr) -// require.NoError(t, err) -// -// formattedWeekThree := fmt.Sprintf("%02d", 3) -// week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// week3, err := underlyingFs.Create(week3filepath) -// require.NoError(t, err) -// _, err = week3.WriteString(week3logLinesStr) -// require.NoError(t, err) -// -// mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) -// } -//} -// -//func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { -// expectedAmountLogLines := 3 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var logLinesFilters []logline.LogLineFilter -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) -// timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) -// timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) -// -// timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} -// timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// formattedWeekNum := fmt.Sprintf("%02d", startingWeek) -// filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// file, err := underlyingFs.Create(filepath) -// require.NoError(t, err) -// _, err = file.WriteString(timestampedLogLinesStr) -// require.NoError(t, err) -// -// mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) -// perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) -// -// expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) -// require.NoError(t, err) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perWeekStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// for _, logLine := range serviceLogLines { -// require.Equal(t, expectedTime, logLine.GetTimestamp()) -// } -// } -//} -// -//func TestStreamUserServiceLogsPerFileReturnsTimestampedLogLines(t *testing.T) { -// t.Skip() -// expectedAmountLogLines := 3 -// -// expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ -// testUserService1Uuid: expectedAmountLogLines, -// } -// -// var logLinesFilters []logline.LogLineFilter -// -// userServiceUuids := map[service.ServiceUUID]bool{ -// testUserService1Uuid: true, -// } -// -// timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) -// timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) -// timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) -// -// timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} -// timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" -// -// underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// filepath := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// file, err := underlyingFs.Create(filepath) -// require.NoError(t, err) -// _, err = file.WriteString(timestampedLogLinesStr) -// require.NoError(t, err) -// -// perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() -// -// expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) -// require.NoError(t, err) -// -// receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( -// t, -// logLinesFilters, -// userServiceUuids, -// expectedServiceAmountLogLinesByServiceUuid, -// doNotFollowLogs, -// underlyingFs, -// perFileStreamStrategy, -// ) -// require.NoError(t, testEvaluationErr) -// -// for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { -// expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] -// require.True(t, found) -// require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) -// require.Equal(t, expectedTime, serviceLogLines[0].GetTimestamp()) -// } -//} -// -//// ==================================================================================================== -//// -//// Private helper functions -//// -//// ==================================================================================================== -//func executeStreamCallAndGetReceivedServiceLogLines( -// t *testing.T, -// logLinesFilters []logline.LogLineFilter, -// userServiceUuids map[service.ServiceUUID]bool, -// expectedServiceAmountLogLinesByServiceUuid map[service.ServiceUUID]int, -// shouldFollowLogs bool, -// underlyingFs volume_filesystem.VolumeFilesystem, -// streamStrategy stream_logs_strategy.StreamLogsStrategy, -//) (map[service.ServiceUUID][]logline.LogLine, error) { -// ctx := context.Background() -// -// receivedServiceLogsByUuid := map[service.ServiceUUID][]logline.LogLine{} -// -// for serviceUuid := range expectedServiceAmountLogLinesByServiceUuid { -// receivedServiceLogsByUuid[serviceUuid] = []logline.LogLine{} -// } -// -// kurtosisBackend := backend_interface.NewMockKurtosisBackend(t) -// -// logsDatabaseClient := NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, underlyingFs, streamStrategy) -// -// userServiceLogsByUuidChan, errChan, receivedCancelCtxFunc, err := logsDatabaseClient.StreamUserServiceLogs(ctx, enclaveUuid, userServiceUuids, logLinesFilters, shouldFollowLogs, defaultShouldReturnAllLogs, defaultNumLogLines) -// if err != nil { -// return nil, stacktrace.Propagate(err, "An error occurred getting user service logs for UUIDs '%+v' using log line filters '%v' in enclave '%v'", userServiceUuids, logLinesFilters, enclaveUuid) -// } -// defer func() { -// if receivedCancelCtxFunc != nil { -// receivedCancelCtxFunc() -// } -// }() -// -// require.NotNil(t, userServiceLogsByUuidChan, "Received a nil user service logs channel, but a non-nil value was expected") -// require.NotNil(t, errChan, "Received a nil error logs channel, but a non-nil value was expected") -// -// shouldReceiveStream := true -// for shouldReceiveStream { -// select { -// case <-time.Tick(testTimeOut): -// return nil, stacktrace.NewError("Receiving stream logs in the test has reached the '%v' time out", testTimeOut) -// case streamErr, isChanOpen := <-errChan: -// if !isChanOpen { -// shouldReceiveStream = false -// break -// } -// return nil, stacktrace.Propagate(streamErr, "Receiving streaming error.") -// case userServiceLogsByUuid, isChanOpen := <-userServiceLogsByUuidChan: -// if !isChanOpen { -// shouldReceiveStream = false -// break -// } -// -// for serviceUuid, serviceLogLines := range userServiceLogsByUuid { -// _, found := userServiceUuids[serviceUuid] -// require.True(t, found) -// -// currentServiceLogLines := receivedServiceLogsByUuid[serviceUuid] -// allServiceLogLines := append(currentServiceLogLines, serviceLogLines...) -// receivedServiceLogsByUuid[serviceUuid] = allServiceLogLines -// } -// -// for serviceUuid, expectedAmountLogLines := range expectedServiceAmountLogLinesByServiceUuid { -// if len(receivedServiceLogsByUuid[serviceUuid]) == expectedAmountLogLines { -// shouldReceiveStream = false -// } else { -// shouldReceiveStream = true -// break -// } -// } -// } -// } -// -// return receivedServiceLogsByUuid, nil -//} -// -//func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { -// logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} -// -// logLinesStr := strings.Join(logLines, "\n") -// -// file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) -// file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) -// -// mapFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// file1, _ := mapFs.Create(file1PathStr) -// _, _ = file1.WriteString(logLinesStr) -// -// file2, _ := mapFs.Create(file2PathStr) -// _, _ = file2.WriteString(logLinesStr) -// -// file3, _ := mapFs.Create(file3PathStr) -// _, _ = file3.WriteString(logLinesStr) -// -// return mapFs -//} -// -//func createFilledPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { -// logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} -// -// logLinesStr := strings.Join(logLines, "\n") -// // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format -// formattedWeekNum := fmt.Sprintf("%02d", week) -// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) -// file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) -// -// mapFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// file1, _ := mapFs.Create(file1PathStr) -// _, _ = file1.WriteString(logLinesStr) -// -// file2, _ := mapFs.Create(file2PathStr) -// _, _ = file2.WriteString(logLinesStr) -// -// file3, _ := mapFs.Create(file3PathStr) -// _, _ = file3.WriteString(logLinesStr) -// -// return mapFs -//} -// -//func createEmptyPerFileFilesystem() volume_filesystem.VolumeFilesystem { -// file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) -// file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) -// -// mapFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// _, _ = mapFs.Create(file1PathStr) -// _, _ = mapFs.Create(file2PathStr) -// _, _ = mapFs.Create(file3PathStr) -// -// return mapFs -//} -// -//func createEmptyPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { -// // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format -// formattedWeekNum := fmt.Sprintf("%02d", week) -// file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -// file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) -// file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) -// -// mapFs := volume_filesystem.NewMockedVolumeFilesystem() -// -// _, _ = mapFs.Create(file1PathStr) -// _, _ = mapFs.Create(file2PathStr) -// _, _ = mapFs.Create(file3PathStr) -// -// return mapFs -//} +func TestStreamUserServiceLogs_WithFilters(t *testing.T) { + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: 2, + testUserService2Uuid: 2, + testUserService3Uuid: 2, + } + + firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) + secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) + regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) + + logLinesFilters := []logline.LogLineFilter{ + *firstTextFilter, + *secondTextFilter, + *regexFilter, + } + + expectedFirstLogLine := "Starting feature 'runs idempotently'" + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + testUserService2Uuid: true, + testUserService3Uuid: true, + } + + underlyingFs := createFilledPerFileFilesystem() + perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perFileStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + +func TestStreamUserServiceLogsPerWeek_WithFilters(t *testing.T) { + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: 2, + testUserService2Uuid: 2, + testUserService3Uuid: 2, + } + + firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) + secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) + regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) + + logLinesFilters := []logline.LogLineFilter{ + *firstTextFilter, + *secondTextFilter, + *regexFilter, + } + + expectedFirstLogLine := "Starting feature 'runs idempotently'" + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + testUserService2Uuid: true, + testUserService3Uuid: true, + } + + underlyingFs := createFilledPerWeekFilesystem(startingWeek) + mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } + + require.NoError(t, testEvaluationErr) +} + +func TestStreamUserServiceLogs_NoLogsFromPersistentVolume(t *testing.T) { + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: 0, + testUserService2Uuid: 0, + testUserService3Uuid: 0, + } + + firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) + + logLinesFilters := []logline.LogLineFilter{ + *firstTextFilter, + } + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + testUserService2Uuid: true, + testUserService3Uuid: true, + } + + underlyingFs := createEmptyPerFileFilesystem() + perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perFileStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + } +} + +func TestStreamUserServiceLogsPerWeek_NoLogsFromPersistentVolume(t *testing.T) { + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: 0, + testUserService2Uuid: 0, + testUserService3Uuid: 0, + } + + firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) + + logLinesFilters := []logline.LogLineFilter{ + *firstTextFilter, + } + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + testUserService2Uuid: true, + testUserService3Uuid: true, + } + + underlyingFs := createEmptyPerWeekFilesystem(startingWeek) + mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + } +} + +func TestStreamUserServiceLogs_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { + expectedAmountLogLines := 10_000 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var emptyFilters []logline.LogLineFilter + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + var logLines []string + + for i := 0; i <= expectedAmountLogLines; i++ { + logLines = append(logLines, logLine1) + } + + logLinesStr := strings.Join(logLines, "\n") + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + + file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) + file1, err := underlyingFs.Create(file1PathStr) + require.NoError(t, err) + _, err = file1.WriteString(logLinesStr) + require.NoError(t, err) + + perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + emptyFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perFileStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + +func TestStreamUserServiceLogsPerWeek_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { + expectedAmountLogLines := 10_000 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var emptyFilters []logline.LogLineFilter + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + var logLines []string + + for i := 0; i <= expectedAmountLogLines; i++ { + logLines = append(logLines, logLine1) + } + + logLinesStr := strings.Join(logLines, "\n") + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format + formattedWeekNum := fmt.Sprintf("%02d", startingWeek) + file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) + file1, err := underlyingFs.Create(file1PathStr) + require.NoError(t, err) + _, err = file1.WriteString(logLinesStr) + require.NoError(t, err) + + mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + emptyFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + +func TestStreamUserServiceLogs_EmptyLogLines(t *testing.T) { + expectedAmountLogLines := 0 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var emptyFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + logLinesStr := "" + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + file1PathStr := fmt.Sprintf("%s%s/%s%s", volume_consts.LogsStorageDirpath, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) + file1, err := underlyingFs.Create(file1PathStr) + require.NoError(t, err) + _, err = file1.WriteString(logLinesStr) + require.NoError(t, err) + + perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + emptyFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perFileStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + } +} + +func TestStreamUserServiceLogsPerWeek_EmptyLogLines(t *testing.T) { + expectedAmountLogLines := 0 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var emptyFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + logLinesStr := "" + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + formattedWeekNum := fmt.Sprintf("%02d", startingWeek) + file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) + file1, err := underlyingFs.Create(file1PathStr) + require.NoError(t, err) + _, err = file1.WriteString(logLinesStr) + require.NoError(t, err) + + mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + emptyFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + } +} + +func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { + expectedAmountLogLines := 8 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + week4logLines := []string{ + logLine5, + logLine6, + logLine7, + logLine8} + week3logLines := []string{ + logLine1, + logLine2, + logLine3a, + logLine3b, + logLine4} + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + + week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" + week4logLinesStr := strings.Join(week4logLines, "\n") + + formattedWeekFour := fmt.Sprintf("%02d", 4) + week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week4, err := underlyingFs.Create(week4filepath) + require.NoError(t, err) + _, err = week4.WriteString(week4logLinesStr) + require.NoError(t, err) + + formattedWeekThree := fmt.Sprintf("%02d", 3) + week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week3, err := underlyingFs.Create(week3filepath) + require.NoError(t, err) + _, err = week3.WriteString(week3logLinesStr) + require.NoError(t, err) + + mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } + +} + +func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { + expectedAmountLogLines := 8 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + week4logLines := []string{ + logLine3b, + logLine4, + logLine5, + logLine6, + logLine7, + logLine8} + week3logLines := []string{ + logLine1, + logLine2, + logLine3a} + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + + week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" + week4logLinesStr := strings.Join(week4logLines, "\n") + "\n" + + formattedWeekFour := fmt.Sprintf("%02d", 4) + week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week4, err := underlyingFs.Create(week4filepath) + require.NoError(t, err) + _, err = week4.WriteString(week4logLinesStr) + require.NoError(t, err) + + formattedWeekThree := fmt.Sprintf("%02d", 3) + week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week3, err := underlyingFs.Create(week3filepath) + require.NoError(t, err) + _, err = week3.WriteString(week3logLinesStr) + require.NoError(t, err) + + mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + +func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { + expectedAmountLogLines := 3 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + + timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} + timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + + formattedWeekNum := fmt.Sprintf("%02d", startingWeek) + filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file, err := underlyingFs.Create(filepath) + require.NoError(t, err) + _, err = file.WriteString(timestampedLogLinesStr) + require.NoError(t, err) + + mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + + expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) + require.NoError(t, err) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perWeekStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + for _, logLine := range serviceLogLines { + require.Equal(t, expectedTime, logLine.GetTimestamp()) + } + } +} + +func TestStreamUserServiceLogsPerFileReturnsTimestampedLogLines(t *testing.T) { + expectedAmountLogLines := 3 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + + timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} + timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + + filepath := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file, err := underlyingFs.Create(filepath) + require.NoError(t, err) + _, err = file.WriteString(timestampedLogLinesStr) + require.NoError(t, err) + + perFileStreamStrategy := stream_logs_strategy.NewPerFileStreamLogsStrategy() + + expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) + require.NoError(t, err) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perFileStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedTime, serviceLogLines[0].GetTimestamp()) + } +} + +// // ==================================================================================================== +// // +// // Private helper functions +// +// ==================================================================================================== +func executeStreamCallAndGetReceivedServiceLogLines( + t *testing.T, + logLinesFilters []logline.LogLineFilter, + userServiceUuids map[service.ServiceUUID]bool, + expectedServiceAmountLogLinesByServiceUuid map[service.ServiceUUID]int, + shouldFollowLogs bool, + underlyingFs volume_filesystem.VolumeFilesystem, + streamStrategy stream_logs_strategy.StreamLogsStrategy, +) (map[service.ServiceUUID][]logline.LogLine, error) { + ctx := context.Background() + + receivedServiceLogsByUuid := map[service.ServiceUUID][]logline.LogLine{} + + for serviceUuid := range expectedServiceAmountLogLinesByServiceUuid { + receivedServiceLogsByUuid[serviceUuid] = []logline.LogLine{} + } + + kurtosisBackend := backend_interface.NewMockKurtosisBackend(t) + + logsDatabaseClient := NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, underlyingFs, streamStrategy) + + userServiceLogsByUuidChan, errChan, receivedCancelCtxFunc, err := logsDatabaseClient.StreamUserServiceLogs(ctx, enclaveUuid, userServiceUuids, logLinesFilters, shouldFollowLogs, defaultShouldReturnAllLogs, defaultNumLogLines) + if err != nil { + return nil, stacktrace.Propagate(err, "An error occurred getting user service logs for UUIDs '%+v' using log line filters '%v' in enclave '%v'", userServiceUuids, logLinesFilters, enclaveUuid) + } + defer func() { + if receivedCancelCtxFunc != nil { + receivedCancelCtxFunc() + } + }() + + require.NotNil(t, userServiceLogsByUuidChan, "Received a nil user service logs channel, but a non-nil value was expected") + require.NotNil(t, errChan, "Received a nil error logs channel, but a non-nil value was expected") + + shouldReceiveStream := true + for shouldReceiveStream { + select { + case <-time.Tick(testTimeOut): + return nil, stacktrace.NewError("Receiving stream logs in the test has reached the '%v' time out", testTimeOut) + case streamErr, isChanOpen := <-errChan: + if !isChanOpen { + shouldReceiveStream = false + break + } + return nil, stacktrace.Propagate(streamErr, "Receiving streaming error.") + case userServiceLogsByUuid, isChanOpen := <-userServiceLogsByUuidChan: + if !isChanOpen { + shouldReceiveStream = false + break + } + + for serviceUuid, serviceLogLines := range userServiceLogsByUuid { + _, found := userServiceUuids[serviceUuid] + require.True(t, found) + + currentServiceLogLines := receivedServiceLogsByUuid[serviceUuid] + allServiceLogLines := append(currentServiceLogLines, serviceLogLines...) + receivedServiceLogsByUuid[serviceUuid] = allServiceLogLines + } + + for serviceUuid, expectedAmountLogLines := range expectedServiceAmountLogLinesByServiceUuid { + if len(receivedServiceLogsByUuid[serviceUuid]) == expectedAmountLogLines { + shouldReceiveStream = false + } else { + shouldReceiveStream = true + break + } + } + } + } + + return receivedServiceLogsByUuid, nil +} + +func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { + logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} + + logLinesStr := strings.Join(logLines, "\n") + + file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) + file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) + + mapFs := volume_filesystem.NewMockedVolumeFilesystem() + + file1, _ := mapFs.Create(file1PathStr) + _, _ = file1.WriteString(logLinesStr) + + file2, _ := mapFs.Create(file2PathStr) + _, _ = file2.WriteString(logLinesStr) + + file3, _ := mapFs.Create(file3PathStr) + _, _ = file3.WriteString(logLinesStr) + + return mapFs +} + +func createFilledPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { + logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} + + logLinesStr := strings.Join(logLines, "\n") + // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format + formattedWeekNum := fmt.Sprintf("%02d", week) + file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) + file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) + + mapFs := volume_filesystem.NewMockedVolumeFilesystem() + + file1, _ := mapFs.Create(file1PathStr) + _, _ = file1.WriteString(logLinesStr) + + file2, _ := mapFs.Create(file2PathStr) + _, _ = file2.WriteString(logLinesStr) + + file3, _ := mapFs.Create(file3PathStr) + _, _ = file3.WriteString(logLinesStr) + + return mapFs +} + +func createEmptyPerFileFilesystem() volume_filesystem.VolumeFilesystem { + file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) + file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) + + mapFs := volume_filesystem.NewMockedVolumeFilesystem() + + _, _ = mapFs.Create(file1PathStr) + _, _ = mapFs.Create(file2PathStr) + _, _ = mapFs.Create(file3PathStr) + + return mapFs +} + +func createEmptyPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { + // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format + formattedWeekNum := fmt.Sprintf("%02d", week) + file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) + file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) + + mapFs := volume_filesystem.NewMockedVolumeFilesystem() + + _, _ = mapFs.Create(file1PathStr) + _, _ = mapFs.Create(file2PathStr) + _, _ = mapFs.Create(file3PathStr) + + return mapFs +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index 79fcd046fb..753776cdfa 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -192,17 +192,15 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( jsonLogStr, err := getCompleteJsonLogString(logsReader) if isValidJsonEnding(jsonLogStr) { - var logLine logline.LogLine jsonLog, err := convertStringToJson(jsonLogStr) if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - logLine, err = strategy.processJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex) + err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid) if err != nil { return err } - logLineSender.SendLogLine(serviceUuid, logLine) } if err != nil { @@ -260,11 +258,10 @@ func (strategy *PerWeekStreamLogsStrategy) streamTailLogs( if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - logLine, err := strategy.processJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex) + err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid) if err != nil { return err } - logLineSender.SendLogLine(serviceUuid, logLine) } return nil @@ -305,9 +302,7 @@ func isValidJsonEnding(line string) bool { return endOfLine == volume_consts.EndOfJsonLine } -func (strategy *PerWeekStreamLogsStrategy) processJsonLogLine( - jsonLog JsonLog, - conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex) (logline.LogLine, error) { +func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine(jsonLog JsonLog, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex, logLineSender *logline.LogLineSender, serviceUuid service.ServiceUUID) error { // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} @@ -315,35 +310,36 @@ func (strategy *PerWeekStreamLogsStrategy) processJsonLogLine( // Then extract the actual log message using the vectors log field logMsgStr, found := jsonLog[volume_consts.LogLabel] if !found { - return logline.LogLine{}, stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog) + return stacktrace.NewError("An error retrieving the log field '%v' from json log: %v\n", volume_consts.LogLabel, jsonLog) } // Extract the timestamp using vectors timestamp field logTimestamp, err := parseTimestampFromJsonLogLine(jsonLog) if err != nil { - return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line.") + return stacktrace.Propagate(err, "An error occurred parsing timestamp from json log line.") } logLine := logline.NewLogLine(logMsgStr, *logTimestamp) // Then filter by checking if the log message is valid based on requested filters validLogLine, err := logLine.IsValidLogLineBaseOnFilters(conjunctiveLogLinesFiltersWithRegex) if err != nil { - return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) + return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) } if !validLogLine { - return logline.LogLine{}, nil + return nil } // ensure this log line is within the retention period if it has a timestamp withinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) if err != nil { - return logline.LogLine{}, stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) + return stacktrace.Propagate(err, "An error occurred filtering log line '%+v' using filters '%+v'", logLine, conjunctiveLogLinesFiltersWithRegex) } if !withinRetentionPeriod { - return logline.LogLine{}, nil + return nil } - return *logLine, nil + logLineSender.SendLogLine(serviceUuid, *logLine) + return nil } // Returns true if [logLine] has no timestamp @@ -398,11 +394,10 @@ func (strategy *PerWeekStreamLogsStrategy) followLogs( // if tail package fails to parse a valid new line, fail fast return stacktrace.NewError("hpcloud/tail returned the following line: '%v' that was not valid json.\nThis is potentially a bug in tailing package.", logLine.Text) } - processedLogLine, err := strategy.processJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex) + err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid) if err != nil { return stacktrace.Propagate(err, "An error occurred sending json log line '%v'.", logLine.Text) } - logLineSender.SendLogLine(serviceUuid, processedLogLine) } } } diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index 871041dbc3..d92375d1d5 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -1,36 +1,44 @@ package logline -import "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" +import ( + "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" + "sync" +) const ( - batchLogsAmount = 500 - logsChanBufferSize = 300 + batchLogsAmount = 1 + logsChanBufferSize = 1 ) type LogLineSender struct { logsChan chan map[service.ServiceUUID][]LogLine - logLineBuffer []LogLine + logLineBuffer map[service.ServiceUUID][]LogLine + + sync.Mutex } func NewLogLineSender() *LogLineSender { return &LogLineSender{ - logsChan: make(chan map[service.ServiceUUID][]LogLine, logsChanBufferSize), - logLineBuffer: []LogLine{}, + logsChan: make(chan map[service.ServiceUUID][]LogLine), + logLineBuffer: map[service.ServiceUUID][]LogLine{}, } } func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLine LogLine) { - sender.logLineBuffer = append(sender.logLineBuffer, logLine) + sender.Mutex.Lock() + defer sender.Mutex.Unlock() + + sender.logLineBuffer[serviceUuid] = append(sender.logLineBuffer[serviceUuid], logLine) - if len(sender.logLineBuffer)%batchLogsAmount == 0 { + if len(sender.logLineBuffer[serviceUuid])%batchLogsAmount == 0 { userServicesLogLinesMap := map[service.ServiceUUID][]LogLine{ - serviceUuid: sender.logLineBuffer, + serviceUuid: sender.logLineBuffer[serviceUuid], } sender.logsChan <- userServicesLogLinesMap // clear buffer after flushing it through the channel - sender.logLineBuffer = []LogLine{} + sender.logLineBuffer[serviceUuid] = []LogLine{} } } From 5fe30ffde1de6e93adc6445af61e47d4097497b2 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Fri, 9 Aug 2024 00:19:27 -0400 Subject: [PATCH 08/24] flush logs and close channel when empty --- .../persistent_volume_logs_database_client.go | 15 ++++++++++++- .../logline/logline_sender.go | 21 ++++++++++++++++--- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go index 02e6544318..d709f9fb28 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go @@ -89,7 +89,11 @@ func (client *persistentVolumeLogsDatabaseClient) StreamUserServiceLogs( //wait for stream go routine to end wgSenders.Wait() - close(logsByKurtosisUserServiceUuidChan) + // send all buffered log lines + logLineSender.Flush() + + // wait until the channel has been fully read/empty before closing it + closeChannelWhenEmpty(logsByKurtosisUserServiceUuidChan) close(streamErrChan) //then cancel the context @@ -154,3 +158,12 @@ func (client *persistentVolumeLogsDatabaseClient) streamServiceLogLines( shouldReturnAllLogs, numLogLines) } + +func closeChannelWhenEmpty(logsChan chan map[service.ServiceUUID][]logline.LogLine) { + for { + if len(logsChan) == 0 { + close(logsChan) + return + } + } +} diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index d92375d1d5..4877226f0c 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -6,8 +6,8 @@ import ( ) const ( - batchLogsAmount = 1 - logsChanBufferSize = 1 + batchLogsAmount = 500 + logsChanBufferSize = 300 ) type LogLineSender struct { @@ -20,7 +20,7 @@ type LogLineSender struct { func NewLogLineSender() *LogLineSender { return &LogLineSender{ - logsChan: make(chan map[service.ServiceUUID][]LogLine), + logsChan: make(chan map[service.ServiceUUID][]LogLine, logsChanBufferSize), logLineBuffer: map[service.ServiceUUID][]LogLine{}, } } @@ -45,3 +45,18 @@ func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLin func (sender *LogLineSender) GetLogsChannel() chan map[service.ServiceUUID][]LogLine { return sender.logsChan } + +// sends all logs remaining in the buffers through the channel +// this should be called at the end of processing to send the remainder of logs +func (sender *LogLineSender) Flush() { + sender.Mutex.Lock() + defer sender.Mutex.Unlock() + + for uuid, logLines := range sender.logLineBuffer { + serviceUuid := uuid + userServiceLogLinesMap := map[service.ServiceUUID][]LogLine{ + serviceUuid: logLines, + } + sender.logsChan <- userServiceLogLinesMap + } +} From 929f4b236119554f6f0c23ebdc38fbf5d91bfa72 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Fri, 9 Aug 2024 00:34:46 -0400 Subject: [PATCH 09/24] clean up --- cli/cli/commands/service/logs/logs.go | 7 ------- .../persistent_volume_logs_database_client.go | 3 +-- ...rsistent_volume_logs_database_client_test.go | 6 +++--- .../per_week_stream_logs_strategy.go | 9 +++------ .../server/engine_connect_server_service.go | 17 ----------------- 5 files changed, 7 insertions(+), 35 deletions(-) diff --git a/cli/cli/commands/service/logs/logs.go b/cli/cli/commands/service/logs/logs.go index 028039224c..550cb5df0b 100644 --- a/cli/cli/commands/service/logs/logs.go +++ b/cli/cli/commands/service/logs/logs.go @@ -27,7 +27,6 @@ import ( "os" "os/signal" "strconv" - "time" ) const ( @@ -263,16 +262,13 @@ func run( interruptChan := make(chan os.Signal, interruptChanBufferSize) signal.Notify(interruptChan, os.Interrupt) - var totalLogPrintDuration time.Duration for { select { case serviceLogsStreamContent, isChanOpen := <-serviceLogsStreamContentChan: if !isChanOpen { - logrus.Infof("CLI [logs.txt] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) return nil } - startTime := time.Now() notFoundServiceUuids := serviceLogsStreamContent.GetNotFoundServiceUuids() for notFoundServiceUuid := range notFoundServiceUuids { @@ -291,11 +287,8 @@ func run( out.PrintOutLn(fmt.Sprintf("[%v] %v", colorPrinter(serviceIdentifier), serviceLog.GetContent())) } } - endTime := time.Now() - totalLogPrintDuration = endTime.Sub(startTime) case <-interruptChan: logrus.Debugf("Received signal interruption in service logs Kurtosis CLI command") - logrus.Infof("CLI [logs.go] TOTAL TIME TO PRINT LOGS: %v", totalLogPrintDuration) return nil } } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go index d709f9fb28..6880e8db3e 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client.go @@ -13,8 +13,7 @@ import ( ) const ( - logLineBufferSize = 300 - oneSenderAdded = 1 + oneSenderAdded = 1 ) // persistentVolumeLogsDatabaseClient pulls logs from a Docker volume the engine is mounted to diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go index 4bcb3378ff..82f14c00e9 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go @@ -670,9 +670,9 @@ func TestStreamUserServiceLogsPerFileReturnsTimestampedLogLines(t *testing.T) { } } -// // ==================================================================================================== -// // -// // Private helper functions +// ==================================================================================================== +// +// Private helper functions // // ==================================================================================================== func executeStreamCallAndGetReceivedServiceLogLines( diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index 753776cdfa..fc6533c5a3 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -197,8 +197,7 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid) - if err != nil { + if err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid); err != nil { return err } } @@ -258,8 +257,7 @@ func (strategy *PerWeekStreamLogsStrategy) streamTailLogs( if err != nil { return stacktrace.Propagate(err, "An error occurred converting the json log string '%v' into json.", jsonLogStr) } - err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid) - if err != nil { + if err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid); err != nil { return err } } @@ -394,8 +392,7 @@ func (strategy *PerWeekStreamLogsStrategy) followLogs( // if tail package fails to parse a valid new line, fail fast return stacktrace.NewError("hpcloud/tail returned the following line: '%v' that was not valid json.\nThis is potentially a bug in tailing package.", logLine.Text) } - err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid) - if err != nil { + if err = strategy.sendJsonLogLine(jsonLog, conjunctiveLogLinesFiltersWithRegex, logLineSender, serviceUuid); err != nil { return stacktrace.Propagate(err, "An error occurred sending json log line '%v'.", logLine.Text) } } diff --git a/engine/server/engine/server/engine_connect_server_service.go b/engine/server/engine/server/engine_connect_server_service.go index a3f3c84225..3fc908e916 100644 --- a/engine/server/engine/server/engine_connect_server_service.go +++ b/engine/server/engine/server/engine_connect_server_service.go @@ -347,8 +347,6 @@ func (service *EngineConnectServerService) GetServiceLogs(ctx context.Context, c } }() - var totalLogStreamDuration time.Duration - var counter int for { select { //stream case @@ -356,39 +354,24 @@ func (service *EngineConnectServerService) GetServiceLogs(ctx context.Context, c //If the channel is closed means that the logs database client won't continue sending streams if !isChanOpen { logrus.Debug("Exiting the stream loop after receiving a close signal from the service logs by service UUID channel") - logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil } - // print out num log lines every 100 lines times - //for serviceUUID, logs := range serviceLogsByServiceUuid { - // if counter%100 == 0 { - // logrus.Infof("NUM LOG LINES FOR SERVICE '%v' CHECK IN ENGINE CONNECT SERVICE: %v", serviceUUID, len(logs)) - // } - //} - - startTime := time.Now() getServiceLogsResponse := newLogsResponse(requestedServiceUuids, serviceLogsByServiceUuid, notFoundServiceUuids) if err := stream.Send(getServiceLogsResponse); err != nil { return stacktrace.Propagate(err, "An error occurred sending the stream logs for service logs response '%+v'", getServiceLogsResponse) } - counter += 1 - endTime := time.Now() - totalLogStreamDuration += endTime.Sub(startTime) //client cancel ctx case case <-contextWithCancel.Done(): logrus.Debug("The user service logs stream has done") - logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil //error from logs database case case err, isChanOpen := <-errChan: if isChanOpen { logrus.Debug("Exiting the stream because an error from the logs database client was received through the error chan.") - logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return stacktrace.Propagate(err, "An error occurred streaming user service logs.") } logrus.Debug("Exiting the stream loop after receiving a close signal from the error chan") - logrus.Infof("ENGINE [engine_connect_server_service.go] TOTAL TIME TO STREAM LOGS IN ENGINE: %v", totalLogStreamDuration) return nil } } From fdd8bf392f16e43ad4eb0be81f85f723be96f38e Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Fri, 9 Aug 2024 00:52:18 -0400 Subject: [PATCH 10/24] undo build script change --- cli/cli/scripts/build.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cli/cli/scripts/build.sh b/cli/cli/scripts/build.sh index db8f5d7443..20f1c2893b 100755 --- a/cli/cli/scripts/build.sh +++ b/cli/cli/scripts/build.sh @@ -97,10 +97,9 @@ fi exit 1 fi # Executing goreleaser v1.26.2 without needing to install it -# if ! curl -sfL https://goreleaser.com/static/run | VERSION=v1.26.2 DISTRIBUTION=oss bash -s -- ${goreleaser_verb_and_flags}; then - if ! GORELEASER_CURRENT_TAG=$(cat $root_dirpath/version.txt) goreleaser ${goreleaser_verb_and_flags}; then - echo "Error: Couldn't build the CLI binary for the current OS/arch" >&2 - exit 1 + if ! curl -sfL https://goreleaser.com/static/run | VERSION=v1.26.2 DISTRIBUTION=oss bash -s -- ${goreleaser_verb_and_flags}; then + echo "Error: Couldn't build the CLI binary for the current OS/arch" >&2 + exit 1 fi ) From dc9d1d173c933feccd207eff2d12d419c2b67e72 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Fri, 9 Aug 2024 00:55:54 -0400 Subject: [PATCH 11/24] name mutex --- .../engine/centralized_logs/logline/logline_sender.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index 4877226f0c..3b086e8359 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -15,7 +15,7 @@ type LogLineSender struct { logLineBuffer map[service.ServiceUUID][]LogLine - sync.Mutex + mu sync.Mutex } func NewLogLineSender() *LogLineSender { @@ -26,8 +26,8 @@ func NewLogLineSender() *LogLineSender { } func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLine LogLine) { - sender.Mutex.Lock() - defer sender.Mutex.Unlock() + sender.mu.Lock() + defer sender.mu.Unlock() sender.logLineBuffer[serviceUuid] = append(sender.logLineBuffer[serviceUuid], logLine) @@ -49,8 +49,8 @@ func (sender *LogLineSender) GetLogsChannel() chan map[service.ServiceUUID][]Log // sends all logs remaining in the buffers through the channel // this should be called at the end of processing to send the remainder of logs func (sender *LogLineSender) Flush() { - sender.Mutex.Lock() - defer sender.Mutex.Unlock() + sender.mu.Lock() + defer sender.mu.Unlock() for uuid, logLines := range sender.logLineBuffer { serviceUuid := uuid From d2b9f86948d96fc0a83349a035c1b8dd4aacf32c Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Fri, 9 Aug 2024 00:58:50 -0400 Subject: [PATCH 12/24] lint --- engine/server/engine/centralized_logs/logline/logline_sender.go | 1 + 1 file changed, 1 insertion(+) diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index 3b086e8359..c2dd3fd240 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -22,6 +22,7 @@ func NewLogLineSender() *LogLineSender { return &LogLineSender{ logsChan: make(chan map[service.ServiceUUID][]LogLine, logsChanBufferSize), logLineBuffer: map[service.ServiceUUID][]LogLine{}, + mu: sync.Mutex{}, } } From 813c98b6b5f1c3306992e02b257c325709b192ea Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Fri, 9 Aug 2024 07:47:18 -0400 Subject: [PATCH 13/24] increase seconds to wait for logs --- .../golang/testsuite/persisted_logs_test/persisted_logs_test.go | 2 +- .../golang/testsuite/stream_logs_test/stream_logs_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go b/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go index a991e71154..b1d09b6936 100644 --- a/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go +++ b/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go @@ -39,7 +39,7 @@ const ( logLine3 = "Starting feature 'enclave pool with size 2'" logLine4 = "The data have being loaded" - secondsToWaitForLogs = 1 * time.Second + secondsToWaitForLogs = 2 * time.Second ) var ( diff --git a/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go b/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go index 257fc7ff36..2e39e2ac3a 100644 --- a/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go +++ b/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go @@ -32,7 +32,7 @@ const ( thirdLogLine = "running" lastLogLine = "successfully" - secondsToWaitForLogs = 1 * time.Second + secondsToWaitForLogs = 2 * time.Second ) var ( From 6349c617b2e10fdf9e7990b3a1e5ae467795a7c6 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 09:25:38 -0400 Subject: [PATCH 14/24] rename send logl ine --- .../stream_logs_strategy/per_file_stream_logs_strategy.go | 2 +- .../stream_logs_strategy/per_week_stream_logs_strategy.go | 2 +- engine/server/engine/centralized_logs/logline/logline_sender.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go index 6c1f7147b4..b322e8c214 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go @@ -122,7 +122,7 @@ func (strategy *PerFileStreamLogsStrategy) StreamLogs( break } - logLineSender.SendLogLine(serviceUuid, *logLine) + logLineSender.Send(serviceUuid, *logLine) } } } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index fc6533c5a3..5f7ce18b3d 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -336,7 +336,7 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine(jsonLog JsonLog, conj return nil } - logLineSender.SendLogLine(serviceUuid, *logLine) + logLineSender.Send(serviceUuid, *logLine) return nil } diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index c2dd3fd240..6aa4c6ad0e 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -26,7 +26,7 @@ func NewLogLineSender() *LogLineSender { } } -func (sender *LogLineSender) SendLogLine(serviceUuid service.ServiceUUID, logLine LogLine) { +func (sender *LogLineSender) Send(serviceUuid service.ServiceUUID, logLine LogLine) { sender.mu.Lock() defer sender.mu.Unlock() From 9510e222513cb798e5cf17a0dd96b6297e1de409 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 09:40:58 -0400 Subject: [PATCH 15/24] move log line before function --- .../stream_logs_strategy/per_week_stream_logs_strategy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index 5f7ce18b3d..86fd788bb9 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -102,11 +102,11 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( if shouldFollowLogs { latestLogFile := paths[len(paths)-1] + logrus.Debugf("Following logs...") if err := strategy.followLogs(ctx, latestLogFile, logLineSender, serviceUuid, conjunctiveLogLinesFiltersWithRegex); err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred creating following logs for service '%v' in enclave '%v'", serviceUuid, enclaveUuid) return } - logrus.Debugf("Following logs...") } } From 3b73af1b85f099406fdaf2bfb635708ec5776f12 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 09:52:22 -0400 Subject: [PATCH 16/24] flush before follow --- .../stream_logs_strategy/per_week_stream_logs_strategy.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go index 86fd788bb9..1717721392 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go @@ -100,6 +100,8 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( } } + // need to flush before following logs + logLineSender.Flush() if shouldFollowLogs { latestLogFile := paths[len(paths)-1] logrus.Debugf("Following logs...") From 470c61f5cac4c635798015bf5ceeaa73c7a253f0 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 09:58:59 -0400 Subject: [PATCH 17/24] clear buffers after flushing --- engine/server/engine/centralized_logs/logline/logline_sender.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/engine/server/engine/centralized_logs/logline/logline_sender.go b/engine/server/engine/centralized_logs/logline/logline_sender.go index 6aa4c6ad0e..0e76510dac 100644 --- a/engine/server/engine/centralized_logs/logline/logline_sender.go +++ b/engine/server/engine/centralized_logs/logline/logline_sender.go @@ -59,5 +59,7 @@ func (sender *LogLineSender) Flush() { serviceUuid: logLines, } sender.logsChan <- userServiceLogLinesMap + + sender.logLineBuffer[serviceUuid] = []LogLine{} } } From 2c489e29b0924c5ba8b350b0f81c68b041e2ce4b Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 09:59:10 -0400 Subject: [PATCH 18/24] revert times --- .../golang/testsuite/persisted_logs_test/persisted_logs_test.go | 2 +- .../golang/testsuite/stream_logs_test/stream_logs_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go b/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go index b1d09b6936..a991e71154 100644 --- a/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go +++ b/internal_testsuites/golang/testsuite/persisted_logs_test/persisted_logs_test.go @@ -39,7 +39,7 @@ const ( logLine3 = "Starting feature 'enclave pool with size 2'" logLine4 = "The data have being loaded" - secondsToWaitForLogs = 2 * time.Second + secondsToWaitForLogs = 1 * time.Second ) var ( diff --git a/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go b/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go index 2e39e2ac3a..257fc7ff36 100644 --- a/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go +++ b/internal_testsuites/golang/testsuite/stream_logs_test/stream_logs_test.go @@ -32,7 +32,7 @@ const ( thirdLogLine = "running" lastLogLine = "successfully" - secondsToWaitForLogs = 2 * time.Second + secondsToWaitForLogs = 1 * time.Second ) var ( From 2c1f0bf60b74beb4b36dc5b3e909d51442fb1340 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 10:02:35 -0400 Subject: [PATCH 19/24] lint --- cli/cli/scripts/build.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cli/cli/scripts/build.sh b/cli/cli/scripts/build.sh index 20f1c2893b..0f8401c209 100755 --- a/cli/cli/scripts/build.sh +++ b/cli/cli/scripts/build.sh @@ -98,9 +98,9 @@ fi fi # Executing goreleaser v1.26.2 without needing to install it if ! curl -sfL https://goreleaser.com/static/run | VERSION=v1.26.2 DISTRIBUTION=oss bash -s -- ${goreleaser_verb_and_flags}; then - echo "Error: Couldn't build the CLI binary for the current OS/arch" >&2 - exit 1 - fi + echo "Error: Couldn't build the CLI binary for the current OS/arch" >&2 + exit 1 + fi ) # Final verification From 815eded157986afaf98f9828e4c99d8b8a9075ae Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 10:37:30 -0400 Subject: [PATCH 20/24] turn off cypress tests --- .circleci/config.yml | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 554dfabbbc..116d50dbc8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1474,17 +1474,18 @@ workflows: all_architectures: true name: "Check if CLI builds for all os and arch pairs" <<: *filters_ignore_main - - - test_enclave_manager_web_ui: - name: "Test Basic Web UI Functionality in Docker" - context: - - docker-user - requires: - - build_cli - - build_api_container_server - - build_engine_server - - build_files_artifacts_expander - <<: *filters_ignore_main +# 08/10/2024, tedi: turning these CI tests off for now as they depend on the package indexer for catalog.kurtosis.com which was shut down with +# the Kurtosis Cloud wind down +# - test_enclave_manager_web_ui: +# name: "Test Basic Web UI Functionality in Docker" +# context: +# - docker-user +# requires: +# - build_cli +# - build_api_container_server +# - build_engine_server +# - build_files_artifacts_expander +# <<: *filters_ignore_main - test_basic_cli_functionality: name: "Test Basic CLI Functionality in Docker" From ccd49c3c6b5008dc72339ed43db1cbbd58e9f767 Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 10:54:00 -0400 Subject: [PATCH 21/24] remove k cloud ref --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a2a01ce152..26857129dc 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ Because of this additional layer of abstraction, we are able to introduce severa How do I get going? =================== -To see Kurtosis in action, first install it using the instructions [here](https://docs.kurtosis.com/install) or visit [Kurtosis Cloud](https://cloud.kurtosis.com/) to provision a remote host. +To see Kurtosis in action, first install it using the instructions [here](https://docs.kurtosis.com/install). Then, run the [Redis voting app Kurtosis package](https://github.com/kurtosis-tech/awesome-kurtosis/tree/main/redis-voting-app): From 684cbd9ad66e0ac2a7a740238147c140486ceccf Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 10:59:51 -0400 Subject: [PATCH 22/24] use latest docs checker --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 116d50dbc8..e2d7b6d798 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -145,7 +145,7 @@ version: 2.1 orbs: npm-publisher: uraway/npm-publisher@0.2.0 - kurtosis-docs-checker: kurtosis-tech/docs-checker@0.2.7 + kurtosis-docs-checker: kurtosis-tech/docs-checker@0.2.8 slack: circleci/slack@4.10.1 executors: From fff5ff0e34318a42f62e9850844bd383d089ee1b Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Sat, 10 Aug 2024 11:16:04 -0400 Subject: [PATCH 23/24] use latest docs checker again --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e2d7b6d798..f5bf1dfeb8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -145,7 +145,7 @@ version: 2.1 orbs: npm-publisher: uraway/npm-publisher@0.2.0 - kurtosis-docs-checker: kurtosis-tech/docs-checker@0.2.8 + kurtosis-docs-checker: kurtosis-tech/docs-checker@0.2.9 slack: circleci/slack@4.10.1 executors: From 2e8cf4368d167e8c8bb90fef6c4aa05f60a876ef Mon Sep 17 00:00:00 2001 From: Tedi Mitiku Date: Mon, 12 Aug 2024 11:17:04 -0400 Subject: [PATCH 24/24] delete CI test --- .circleci/config.yml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f5bf1dfeb8..792840db5b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1474,18 +1474,6 @@ workflows: all_architectures: true name: "Check if CLI builds for all os and arch pairs" <<: *filters_ignore_main -# 08/10/2024, tedi: turning these CI tests off for now as they depend on the package indexer for catalog.kurtosis.com which was shut down with -# the Kurtosis Cloud wind down -# - test_enclave_manager_web_ui: -# name: "Test Basic Web UI Functionality in Docker" -# context: -# - docker-user -# requires: -# - build_cli -# - build_api_container_server -# - build_engine_server -# - build_files_artifacts_expander -# <<: *filters_ignore_main - test_basic_cli_functionality: name: "Test Basic CLI Functionality in Docker"