Skip to content

Commit

Permalink
Merge pull request #125 from kmala/storage
Browse files Browse the repository at this point in the history
feat(swift) : openstack swift support
  • Loading branch information
kmala authored Jul 11, 2016
2 parents f745e74 + bad5362 commit b66069b
Show file tree
Hide file tree
Showing 7 changed files with 234 additions and 109 deletions.
3 changes: 3 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ branches:
sudo: required
services:
- docker
before_install:
- sudo apt-get -qq update
- sudo apt-get install -y python-swiftclient
env:
# HACK(bacongobbler): make travis tests work
- DEIS_REGISTRY=travis-ci/
Expand Down
9 changes: 7 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,12 @@ test-style:
test-unit:
@echo "Implement functional tests in _tests directory"

test-functional:
contrib/ci/test.sh ${IMAGE}
test-functional: test-functional-swift test-functional-minio

test-functional-minio:
contrib/ci/test-minio.sh ${IMAGE}

test-functional-swift:
contrib/ci/test-swift.sh ${IMAGE}

.PHONY: all docker-build docker-push test
74 changes: 74 additions & 0 deletions contrib/ci/test-minio.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#!/usr/bin/env bash

set -eof pipefail

TEST_ROOT=$(dirname "${BASH_SOURCE}")/
source "${TEST_ROOT}/test.sh"

# make sure we are in this dir
CURRENT_DIR=$(cd $(dirname $0); pwd)

create-postgres-creds

puts-step "creating fake minio credentials"

# create fake AWS credentials for minio admin credentials
mkdir -p $CURRENT_DIR/tmp/aws-admin
# needs to be 20 characters long
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-admin/access-key-id
# needs to be 40 characters long
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-admin/access-secret-key

# create fake AWS credentials for minio user credentials
mkdir -p $CURRENT_DIR/tmp/aws-user
# needs to be 20 characters long
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-user/accesskey
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-user/access-key-id
# needs to be 40 characters long
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-user/secretkey
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-user/access-secret-key

puts-step "creating fake kubernetes service account token"

# create fake k8s serviceaccount token for minio to "discover" itself
mkdir -p $CURRENT_DIR/tmp/k8s
echo "token" > $CURRENT_DIR/tmp/k8s/token
echo "cert" > $CURRENT_DIR/tmp/k8s/ca.crt

# kill containers when this script exits or errors out
trap 'kill-container $MINIO_JOB' INT TERM
# boot minio
MINIO_JOB=$(docker run -dv $CURRENT_DIR/tmp/aws-admin:/var/run/secrets/deis/minio/admin -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/minio/user -v $CURRENT_DIR/tmp/k8s:/var/run/secrets/kubernetes.io/serviceaccount quay.io/deisci/minio:canary boot server /home/minio/)

# boot postgres, linking the minio container and setting DEIS_MINIO_SERVICE_HOST and DEIS_MINIO_SERVICE_PORT
PG_CMD="docker run -d --link $MINIO_JOB:minio -e BACKUP_FREQUENCY=1s -e DATABASE_STORAGE=minio -e DEIS_MINIO_SERVICE_HOST=minio -e DEIS_MINIO_SERVICE_PORT=9000 -v $CURRENT_DIR/tmp/creds:/var/run/secrets/deis/database/creds -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/objectstore/creds $1"

# kill containers when this script exits or errors out
trap 'kill-container $PG_JOB' INT TERM
start-postgres "$PG_CMD"

# display logs for debugging purposes
puts-step "displaying minio logs"
docker logs $MINIO_JOB

check-postgres $PG_JOB

# check if minio has the 5 backups
puts-step "checking if minio has 5 backups"
BACKUPS="$(docker exec $MINIO_JOB ls /home/minio/dbwal/basebackups_005/ | grep json)"
NUM_BACKUPS="$(docker exec $MINIO_JOB ls /home/minio/dbwal/basebackups_005/ | grep -c json)"
# NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
# in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
if [[ ! "$NUM_BACKUPS" -eq "5" && ! "$NUM_BACKUPS" -eq "6" ]]; then
puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
puts-error "$BACKUPS"
exit 1
fi

# kill off postgres, then reboot and see if it's running after recovering from backups
puts-step "shutting off postgres, then rebooting to test data recovery"
kill-container $PG_JOB

start-postgres "$PG_CMD"

check-postgres $PG_JOB
77 changes: 77 additions & 0 deletions contrib/ci/test-swift.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
#!/usr/bin/env bash

set -eof pipefail

TEST_ROOT=$(dirname "${BASH_SOURCE}")/
source "${TEST_ROOT}/test.sh"

# make sure we are in this dir
CURRENT_DIR=$(cd $(dirname $0); pwd)

create-postgres-creds

puts-step "fetching openstack credentials"

# turn creds into something that we can use.
mkdir -p $CURRENT_DIR/tmp/swift

# guess which value to use for tenant:
TENANT=""

echo "test:tester" > $CURRENT_DIR/tmp/swift/username
echo "testing" > $CURRENT_DIR/tmp/swift/password
echo ${TENANT} > $CURRENT_DIR/tmp/swift/tenant
echo "http://swift:8080/auth/v1.0" > $CURRENT_DIR/tmp/swift/authurl
echo "1" > $CURRENT_DIR/tmp/swift/authversion
echo "deis-swift-test" > $CURRENT_DIR/tmp/swift/database-container

# kill containers when this script exits or errors out
trap 'kill-container $SWIFT_DATA' INT TERM
# boot swift
SWIFT_DATA=$(docker run -v /srv --name SWIFT_DATA busybox)

# kill containers when this script exits or errors out
trap 'kill-container $SWIFT_JOB' INT TERM
SWIFT_JOB=$(docker run --name onlyone --hostname onlyone -d -p 12345:8080 --volumes-from SWIFT_DATA -t deis/swift-onlyone:git-8516d23)


# postgres container command
PG_CMD="docker run -d --link $SWIFT_JOB:swift -e BACKUP_FREQUENCY=3s \
-e DATABASE_STORAGE=swift \
-v $CURRENT_DIR/tmp/creds:/var/run/secrets/deis/database/creds \
-v $CURRENT_DIR/tmp/swift:/var/run/secrets/deis/objectstore/creds \
$1"

# kill containers when this script exits or errors out
trap 'kill-container $PG_JOB' INT TERM
start-postgres "$PG_CMD"

# display logs for debugging purposes
puts-step "displaying swift logs"
docker logs $SWIFT_JOB

check-postgres $PG_JOB

# check if swift has some backups ... 3 ?
puts-step "checking if swift has at least 3 backups"

BACKUPS="$(swift -A http://127.0.0.1:12345/auth/v1.0 -U test:tester -K testing list deis-swift-test | grep basebackups_005 | grep json)"
NUM_BACKUPS="$(swift -A http://127.0.0.1:12345/auth/v1.0 -U test:tester -K testing list deis-swift-test | grep basebackups_005 | grep -c json)"
# NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
# in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
if [[ ! "$NUM_BACKUPS" -eq "5" && ! "$NUM_BACKUPS" -eq "6" ]]; then
puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
puts-error "$BACKUPS"
exit 1
fi

# kill off postgres, then reboot and see if it's running after recovering from backups
puts-step "shutting off postgres, then rebooting to test data recovery"
kill-container $PG_JOB

start-postgres "$PG_CMD"

check-postgres $PG_JOB

puts-step "tests PASSED!"
exit 0
108 changes: 24 additions & 84 deletions contrib/ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,93 +10,33 @@ puts-error() {
echo "!!! $@"
}

kill-containers() {
puts-step "destroying containers"
docker rm -f "$MINIO_JOB" "$PG_JOB"
kill-container() {
puts-step "destroying container $1"
docker rm -f "$1"
}

# make sure we are in this dir
CURRENT_DIR=$(cd $(dirname $0); pwd)
create-postgres-creds() {
puts-step "creating fake postgres credentials"

puts-step "creating fake postgres credentials"

# create fake postgres credentials
mkdir -p $CURRENT_DIR/tmp/creds
echo "testuser" > $CURRENT_DIR/tmp/creds/user
echo "icanttellyou" > $CURRENT_DIR/tmp/creds/password

puts-step "creating fake minio credentials"

# create fake AWS credentials for minio admin credentials
mkdir -p $CURRENT_DIR/tmp/aws-admin
# needs to be 20 characters long
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-admin/access-key-id
# needs to be 40 characters long
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-admin/access-secret-key

# create fake AWS credentials for minio user credentials
mkdir -p $CURRENT_DIR/tmp/aws-user
# needs to be 20 characters long
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-user/accesskey
echo "12345678901234567890" > $CURRENT_DIR/tmp/aws-user/access-key-id
# needs to be 40 characters long
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-user/secretkey
echo "1234567890123456789012345678901234567890" > $CURRENT_DIR/tmp/aws-user/access-secret-key

puts-step "creating fake kubernetes service account token"

# create fake k8s serviceaccount token for minio to "discover" itself
mkdir -p $CURRENT_DIR/tmp/k8s
echo "token" > $CURRENT_DIR/tmp/k8s/token
echo "cert" > $CURRENT_DIR/tmp/k8s/ca.crt

# boot minio
MINIO_JOB=$(docker run -dv $CURRENT_DIR/tmp/aws-admin:/var/run/secrets/deis/minio/admin -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/minio/user -v $CURRENT_DIR/tmp/k8s:/var/run/secrets/kubernetes.io/serviceaccount quay.io/deisci/minio:canary boot server /home/minio/)

# boot postgres, linking the minio container and setting DEIS_MINIO_SERVICE_HOST and DEIS_MINIO_SERVICE_PORT
PG_JOB=$(docker run -d --link $MINIO_JOB:minio -e BACKUP_FREQUENCY=1s -e DATABASE_STORAGE=minio -e DEIS_MINIO_SERVICE_HOST=minio -e DEIS_MINIO_SERVICE_PORT=9000 -v $CURRENT_DIR/tmp/creds:/var/run/secrets/deis/database/creds -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/objectstore/creds $1)

# kill containers when this script exits or errors out
trap kill-containers INT TERM

# wait for postgres to boot
puts-step "sleeping for 90s while postgres is booting..."
sleep 90s

# display logs for debugging purposes
puts-step "displaying minio logs"
docker logs $MINIO_JOB
puts-step "displaying postgres logs"
docker logs $PG_JOB

# check if postgres is running
puts-step "checking if postgres is running"
docker exec $PG_JOB is_running

# check if minio has the 5 backups
puts-step "checking if minio has 5 backups"
BACKUPS="$(docker exec $MINIO_JOB ls /home/minio/dbwal/basebackups_005/ | grep json)"
NUM_BACKUPS="$(docker exec $MINIO_JOB ls /home/minio/dbwal/basebackups_005/ | grep -c json)"
# NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
# in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
if [[ ! "$NUM_BACKUPS" -eq "5" && ! "$NUM_BACKUPS" -eq "6" ]]; then
puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
puts-error "$BACKUPS"
exit 1
fi

# kill off postgres, then reboot and see if it's running after recovering from backups
puts-step "shutting off postgres, then rebooting to test data recovery"
docker rm -f $PG_JOB
PG_JOB=$(docker run -d --link $MINIO_JOB:minio -e BACKUP_FREQUENCY=1s -e DATABASE_STORAGE=minio -e DEIS_MINIO_SERVICE_HOST=minio -e DEIS_MINIO_SERVICE_PORT=9000 -v $CURRENT_DIR/tmp/creds:/var/run/secrets/deis/database/creds -v $CURRENT_DIR/tmp/aws-user:/var/run/secrets/deis/objectstore/creds $1)
# create fake postgres credentials
mkdir -p $CURRENT_DIR/tmp/creds
echo "testuser" > $CURRENT_DIR/tmp/creds/user
echo "icanttellyou" > $CURRENT_DIR/tmp/creds/password
}

# wait for postgres to boot
puts-step "sleeping for 90s while postgres is recovering from backup..."
sleep 90s
start-postgres() {
PG_JOB=$($1)
# wait for postgres to boot
puts-step "sleeping for 90s while postgres is booting..."
sleep 90s
}

puts-step "displaying postgres logs"
docker logs $PG_JOB
check-postgres() {
# display logs for debugging purposes
puts-step "displaying postgres logs"
docker logs $1

# check if postgres is running
puts-step "checking if postgres is running"
docker exec $PG_JOB is_running
# check if postgres is running
puts-step "checking if postgres is running"
docker exec $1 is_running
}
57 changes: 34 additions & 23 deletions rootfs/bin/create_bucket
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#!/usr/bin/env python

import os
import sys

import boto3
import botocore
import json
import swiftclient
from botocore.utils import fix_s3_host
from botocore.client import Config
from oauth2client.service_account import ServiceAccountCredentials
Expand All @@ -30,7 +30,7 @@ if os.getenv('DATABASE_STORAGE') == "s3":
raise

if not exists:
conn.create_bucket(Bucket=bucket_name)
conn.create_bucket(Bucket=bucket_name)

elif os.getenv('DATABASE_STORAGE') == "gcs":
scopes = ['https://www.googleapis.com/auth/devstorage.full_control']
Expand All @@ -46,28 +46,39 @@ elif os.getenv('DATABASE_STORAGE') == "gcs":
except:
raise
if not exists:
client.create_bucket(bucket_name)
client.create_bucket(bucket_name)

elif os.getenv('DATABASE_STORAGE') == "azure":
block_blob_service = BlockBlobService(account_name=os.getenv('WABS_ACCOUNT_NAME'), account_key=os.getenv('WABS_ACCESS_KEY'))
#It doesn't throw an exception if the container exists by default(https://github.com/Azure/azure-storage-python/blob/master/azure/storage/blob/baseblobservice.py#L504).
block_blob_service.create_container(bucket_name)
block_blob_service = BlockBlobService(account_name=os.getenv('WABS_ACCOUNT_NAME'), account_key=os.getenv('WABS_ACCESS_KEY'))
#It doesn't throw an exception if the container exists by default(https://github.com/Azure/azure-storage-python/blob/master/azure/storage/blob/baseblobservice.py#L504).
block_blob_service.create_container(bucket_name)

else :
conn = boto3.resource('s3', endpoint_url=os.getenv('S3_URL'), config=Config(signature_version='s3v4'))
# stop boto3 from automatically changing the endpoint
conn.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
exists = True
try:
conn.meta.client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
else:
raise
elif os.getenv('DATABASE_STORAGE') == "swift":
conn = swiftclient.Connection(
user=os.getenv('SWIFT_USER'),
key=os.getenv('SWIFT_PASSWORD'),
authurl=os.getenv('SWIFT_AUTHURL'),
auth_version=os.getenv('SWIFT_AUTH_VERSION'),
tenant_name=os.getenv('SWIFT_TENANT')
)
# swift also does not throw exception if container already exists.
conn.put_container(os.getenv('BUCKET_NAME'))

if not exists:
conn.create_bucket(Bucket=bucket_name)
else:
conn = boto3.resource('s3', endpoint_url=os.getenv('S3_URL'), config=Config(signature_version='s3v4'))
# stop boto3 from automatically changing the endpoint
conn.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
exists = True
try:
conn.meta.client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
else:
raise

if not exists:
conn.create_bucket(Bucket=bucket_name)
15 changes: 15 additions & 0 deletions rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,19 @@ elif [ "$DATABASE_STORAGE" == "azure" ]; then
echo $WABS_ACCESS_KEY > WABS_ACCESS_KEY
echo "wabs://$BUCKET_NAME" > WALE_WABS_PREFIX
echo $BUCKET_NAME > BUCKET_NAME
elif [ "$DATABASE_STORAGE" == "swift" ]; then
SWIFT_USER=$(cat /var/run/secrets/deis/objectstore/creds/username)
SWIFT_PASSWORD=$(cat /var/run/secrets/deis/objectstore/creds/password)
SWIFT_TENANT=$(cat /var/run/secrets/deis/objectstore/creds/tenant)
SWIFT_AUTHURL=$(cat /var/run/secrets/deis/objectstore/creds/authurl)
SWIFT_AUTH_VERSION=$(cat /var/run/secrets/deis/objectstore/creds/authversion)
BUCKET_NAME=$(cat /var/run/secrets/deis/objectstore/creds/database-container)
# set defaults for variables that we can guess at
echo $SWIFT_USER > SWIFT_USER
echo $SWIFT_PASSWORD > SWIFT_PASSWORD
echo $SWIFT_TENANT > SWIFT_TENANT
echo $SWIFT_AUTHURL > SWIFT_AUTHURL
echo $SWIFT_AUTH_VERSION > SWIFT_AUTH_VERSION
echo "swift://$BUCKET_NAME" > WALE_SWIFT_PREFIX
echo $BUCKET_NAME > BUCKET_NAME
fi

0 comments on commit b66069b

Please sign in to comment.