Skip to content

Commit

Permalink
fix: removing the backup bucket env check to determine if backup is e…
Browse files Browse the repository at this point in the history
…nabled (#2668)
  • Loading branch information
BonapartePC authored Nov 8, 2022
1 parent 56f0f23 commit ea8cf4c
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 27 deletions.
2 changes: 1 addition & 1 deletion enterprise/replay/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func loadConfig() {
func initFileManager() (filemanager.FileManager, string, error) {
bucket := strings.TrimSpace(config.GetString("JOBS_REPLAY_BACKUP_BUCKET", ""))
if bucket == "" {
pkgLogger.Error("[[ Replay ]] JOBS_BACKUP_BUCKET is not set")
pkgLogger.Error("[[ Replay ]] JOBS_REPLAY_BACKUP_BUCKET is not set")
panic("Bucket is not configured.")
}

Expand Down
2 changes: 1 addition & 1 deletion jobsdb/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type backupSettings struct {
}

func (b *backupSettings) isBackupEnabled() bool {
return masterBackupEnabled && b.instanceBackupEnabled && config.GetString("JOBS_BACKUP_BUCKET", "") != ""
return masterBackupEnabled && b.instanceBackupEnabled
}

func IsMasterBackupEnabled() bool {
Expand Down
18 changes: 0 additions & 18 deletions services/archiver/archiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,6 @@ func loadConfig() {
config.RegisterIntConfigVariable(100, &backupRowsBatchSize, true, 1, "Archiver.backupRowsBatchSize")
}

func IsArchiverObjectStorageConfigured() bool {
provider := config.GetString("JOBS_BACKUP_STORAGE_PROVIDER", "")
bucket := config.GetString("JOBS_BACKUP_BUCKET", "")
return provider != "" && bucket != ""
}

// ArchiveOldRecords archives records in the table with the name`tableName` and `tsColumn` provided is used as the timestamp column.
func ArchiveOldRecords(tableName, tsColumn string, archivalTimeInDays int, dbHandle *sql.DB) {
stmt := fmt.Sprintf(`SELECT count(*), COALESCE(MIN(id),0), COALESCE(MAX(id),0) FROM %s WHERE %s < NOW() -INTERVAL '%d DAY'`, tableName, tsColumn, archivalTimeInDays)
Expand All @@ -51,18 +45,6 @@ func ArchiveOldRecords(tableName, tsColumn string, archivalTimeInDays int, dbHan
return
}

// TODO: Should we skip deletion if object storage provider not configured?
if !IsArchiverObjectStorageConfigured() {
stmt = fmt.Sprintf(`DELETE FROM %s WHERE id >= %d and id <= %d`, tableName, minID, maxID)
_, err = dbHandle.Exec(stmt)
if err != nil {
pkgLogger.Errorf(`[Archiver]: Error in deleting %s records: %v`, tableName, err)
return
}
pkgLogger.Infof(`[Archiver]: Deleted %s records %d to %d. No object storage was configured for archival`, tableName, minID, maxID)
return
}

tmpDirPath, err := misc.CreateTMPDIR()
if err != nil {
pkgLogger.Errorf("[Archiver]: Failed to create tmp DIR")
Expand Down
10 changes: 5 additions & 5 deletions services/filemanager/filemanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func GetProviderConfigFromEnv(ctx context.Context, provider string) map[string]i
switch provider {

case "S3":
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "")
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "rudder-saas")
providerConfig["prefix"] = config.GetString("JOBS_BACKUP_PREFIX", "")
providerConfig["accessKeyID"] = config.GetString("AWS_ACCESS_KEY_ID", "")
providerConfig["accessKey"] = config.GetString("AWS_SECRET_ACCESS_KEY", "")
Expand All @@ -109,29 +109,29 @@ func GetProviderConfigFromEnv(ctx context.Context, provider string) map[string]i
}

case "GCS":
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "")
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "rudder-saas")
providerConfig["prefix"] = config.GetString("JOBS_BACKUP_PREFIX", "")
credentials, err := os.ReadFile(config.GetString("GOOGLE_APPLICATION_CREDENTIALS", ""))
if err == nil {
providerConfig["credentials"] = string(credentials)
}

case "AZURE_BLOB":
providerConfig["containerName"] = config.GetString("JOBS_BACKUP_BUCKET", "")
providerConfig["containerName"] = config.GetString("JOBS_BACKUP_BUCKET", "rudder-saas")
providerConfig["prefix"] = config.GetString("JOBS_BACKUP_PREFIX", "")
providerConfig["accountName"] = config.GetString("AZURE_STORAGE_ACCOUNT", "")
providerConfig["accountKey"] = config.GetString("AZURE_STORAGE_ACCESS_KEY", "")

case "MINIO":
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "")
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "rudder-saas")
providerConfig["prefix"] = config.GetString("JOBS_BACKUP_PREFIX", "")
providerConfig["endPoint"] = config.GetString("MINIO_ENDPOINT", "localhost:9000")
providerConfig["accessKeyID"] = config.GetString("MINIO_ACCESS_KEY_ID", "minioadmin")
providerConfig["secretAccessKey"] = config.GetString("MINIO_SECRET_ACCESS_KEY", "minioadmin")
providerConfig["useSSL"] = config.GetBool("MINIO_SSL", false)

case "DIGITAL_OCEAN_SPACES":
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "")
providerConfig["bucketName"] = config.GetString("JOBS_BACKUP_BUCKET", "rudder-saas")
providerConfig["prefix"] = config.GetString("JOBS_BACKUP_PREFIX", "")
providerConfig["endPoint"] = config.GetString("DO_SPACES_ENDPOINT", "")
providerConfig["accessKeyID"] = config.GetString("DO_SPACES_ACCESS_KEY_ID", "")
Expand Down
3 changes: 1 addition & 2 deletions warehouse/archiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"github.com/iancoleman/strcase"
"github.com/lib/pq"
"github.com/rudderlabs/rudder-server/config"
"github.com/rudderlabs/rudder-server/services/archiver"
"github.com/rudderlabs/rudder-server/services/archiver/tablearchiver"
"github.com/rudderlabs/rudder-server/services/filemanager"
"github.com/rudderlabs/rudder-server/utils/misc"
Expand Down Expand Up @@ -272,7 +271,7 @@ func archiveUploads(dbHandle *sql.DB) {

var storedStagingFilesLocation string
if len(stagingFileIDs) > 0 {
if archiver.IsArchiverObjectStorageConfigured() && !hasUsedRudderStorage {
if !hasUsedRudderStorage {
filterSQL := fmt.Sprintf(`id IN (%v)`, misc.IntArrayToString(stagingFileIDs, ","))
storedStagingFilesLocation, err = backupRecords(backupRecordsArgs{
tableName: warehouseutils.WarehouseStagingFilesTable,
Expand Down

0 comments on commit ea8cf4c

Please sign in to comment.