-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy patheessi-upload-to-staging
executable file
·268 lines (245 loc) · 8.66 KB
/
eessi-upload-to-staging
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
#!/bin/bash
#
# This file is part of the EESSI infrastructure,
# see https://github.com/EESSI/infrastructure
#
# author: Bob Droege (@bedroge)
# author: Terje Kvernes (@terjekv)
# author: Thomas Roeblitz (@trz42)
#
# license: GPLv2
#
function upload_to_staging_bucket
{
_file=$1
_bucket=$2
_path=$3
_endpoint_url=$4
_options=
if [[ ! -z "${_endpoint_url}" ]]; then
_options="--endpoint-url ${_endpoint_url}"
fi
aws ${_options} s3 cp "${_file}" s3://${_bucket}/${_path}
}
# This needs expanding etc.
function check_file_name
{
filename=$1
if ( echo ${filename} | grep ^eessi > /dev/null &&
echo ${filename} | grep -E '(compat|init|software)' > /dev/null ); then
return 0
else
return 1
fi
}
function create_metadata_file
{
_artefact=$1
_url=$2
_repository=$3
_pull_request_number=$4
_pull_request_comment_id=$5
_tmpfile=$(mktemp)
jq -n \
--arg un $(whoami) \
--arg ip $(curl -s https://checkip.amazonaws.com) \
--arg hn "$(hostname -f)" \
--arg fn "$(basename ${_artefact})" \
--arg sz "$(du -b "${_artefact}" | awk '{print $1}')" \
--arg ct "$(date -r "${_artefact}")" \
--arg sha256 "$(sha256sum "${_artefact}" | awk '{print $1}')" \
--arg url "${_url}" \
--arg repo "${_repository}" \
--arg pr "${_pull_request_number}" \
--arg pr_comment_id "${_pull_request_comment_id}" \
'{
uploader: {username: $un, ip: $ip, hostname: $hn},
payload: {filename: $fn, size: $sz, ctime: $ct, sha256sum: $sha256, url: $url},
link2pr: {repo: $repo, pr: $pr, pr_comment_id: $pr_comment_id},
}' > "${_tmpfile}"
echo "${_tmpfile}"
}
function display_help
{
echo "Usage: $0 [OPTIONS] <filenames>" >&2
echo " -a | --artefact-prefix PREFIX - a directory to which the artefact" >&2
echo " shall be uploaded; BASH variable" >&2
echo " expansion will be applied; arg '-l'" >&2
echo " lists variables that are defined at" >&2
echo " the time of expansion" >&2
echo " -e | --endpoint-url URL - endpoint url (needed for non AWS S3)" >&2
echo " -h | --help - display this usage information" >&2
echo " -i | --pr-comment-id - identifier of a PR comment; may be" >&2
echo " used to efficiently determine the PR" >&2
echo " comment to be updated during the" >&2
echo " ingestion procedure" >&2
echo " -l | --list-variables - list variables that are available" >&2
echo " for expansion" >&2
echo " -m | --metadata-prefix PREFIX - a directory to which the metadata" >&2
echo " file shall be uploaded; BASH variable" >&2
echo " expansion will be applied; arg '-l'" >&2
echo " lists variables that are defined at" >&2
echo " the time of expansion" >&2
echo " -n | --bucket-name BUCKET - bucket name (same as BUCKET above)" >&2
echo " -p | --pull-request-number INT - a pull request number (INT); used to" >&2
echo " link the upload to a PR" >&2
echo " -r | --repository FULL_NAME - a repository name ACCOUNT/REPONAME;" >&2
echo " used to link the upload to a PR" >&2
}
if [[ $# -lt 1 ]]; then
display_help
exit 1
fi
# process command line args
POSITIONAL_ARGS=()
# depends on which service hosts the bucket
# minio: https://MINIO_SERVER:MINIO_PORT/{bucket_name}/
# s3aws: https://{bucket_name}.s3.amazonaws.com/
# should be contructable from endpoint_url and bucket_name
bucket_base=
# default bucket is eessi-staging
bucket_name="eessi-staging"
# provided via options in the bot's config file app.cfg
endpoint_url=
# provided via command line arguments
pr_comment_id="none"
pull_request_number="none"
github_repository="EESSI/software-layer"
# provided via options in the bot's config file app.cfg and/or command line argument
metadata_prefix=
artefact_prefix=
# other variables
legacy_aws_path=
variables="github_repository legacy_aws_path pull_request_number"
while [[ $# -gt 0 ]]; do
case $1 in
-a|--artefact-prefix)
artefact_prefix="$2"
shift 2
;;
-e|--endpoint-url)
endpoint_url="$2"
shift 2
;;
-h|--help)
display_help
exit 0
;;
-l|--list-variables)
echo "variables that will be expanded: name (default value)"
for var in ${variables}
do
echo " ${var} (${!var:-unset})"
done
exit 0
;;
-i|--pr-comment-id)
pr_comment_id="$2"
shift 2
;;
-m|--metadata-prefix)
metadata_prefix="$2"
shift 2
;;
-n|--bucket-name)
bucket_name="$2"
shift 2
;;
-p|--pull-request-number)
pull_request_number="$2"
shift 2
;;
-r|--repository)
github_repository="$2"
shift 2
;;
-*|--*)
echo "Error: Unknown option: $1" >&2
exit 1
;;
*) # No more options
POSITIONAL_ARGS+=("$1") # save positional arg
shift
;;
esac
done
# restore potentially parsed filename(s) into $*
set -- "${POSITIONAL_ARGS[@]}"
# infer bucket_base:
# if endpoint_url is not set (assume AWS S3 is used),
# bucket_base=https://${bucket_name}.s3.amazonaws.com/
# if endpoint_url is set (assume non AWS S3, eg minio, is used),
# bucket_base=${endpoint_url}/${bucket_name}/
# check if endpoint_url is not set
if [[ -z "${endpoint_url}" ]]; then
# assume AWS S3 being used
bucket_base=https://${bucket_name}.s3.amazonaws.com
else
# assume non AWS S3 being used or AWS S3 with bucket not in DNS
bucket_base=${endpoint_url}/${bucket_name}
fi
for file in "$*"; do
if [[ -r "${file}" && -f "${file}" && -s "${file}" ]]; then
basefile=$( basename ${file} )
if check_file_name ${basefile}; then
if tar tf "${file}" | head -n1 > /dev/null; then
# 'legacy_aws_path' might be used in artefact_prefix or metadata_prefix
# its purpose is to support the old/legacy method to derive the location
# where to store the artefact and metadata file
export legacy_aws_path=$(basename ${file} | tr -s '-' '/' \
| perl -pe 's/^eessi.//;' | perl -pe 's/\.tar\.gz$//;' )
if [ -z ${artefact_prefix} ]; then
aws_path=${legacy_aws_path}
else
export pull_request_number
export github_repository
aws_path=$(envsubst <<< "${artefact_prefix}")
fi
aws_file=$(basename ${file})
echo "Creating metadata file"
url="${bucket_base}/${aws_path}/${aws_file}"
echo "create_metadata_file file=${file} \
url=${url} \
github_repository=${github_repository} \
pull_request_number=${pull_request_number} \
pr_comment_id=${pr_comment_id}"
metadata_file=$(create_metadata_file "${file}" \
"${url}" \
"${github_repository}" \
"${pull_request_number}" \
"${pr_comment_id}")
echo "metadata:"
cat ${metadata_file}
echo Uploading to "${url}"
echo " store artefact at ${aws_path}/${aws_file}"
upload_to_staging_bucket \
"${file}" \
"${bucket_name}" \
"${aws_path}/${aws_file}" \
"${endpoint_url}"
if [ -z ${metadata_prefix} ]; then
aws_path=${legacy_aws_path}
else
export pull_request_number
export github_repository
aws_path=$(envsubst <<< "${metadata_prefix}")
fi
echo " store metadata file at ${aws_path}/${aws_file}.meta.txt"
upload_to_staging_bucket \
"${metadata_file}" \
"${bucket_name}" \
"${aws_path}/${aws_file}.meta.txt" \
"${endpoint_url}"
else
echo "'${file}' is not a tar file."
exit 1
fi
else
echo "${file} does not look like an eessi layer filename!"
exit 1
fi
else
echo "'${file}' is not a readable non zero-sized file."
exit 1
fi
done