forked from DataDog/datadog-lambda-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsign_layers.sh
More file actions
executable file
·111 lines (94 loc) · 3.41 KB
/
sign_layers.sh
File metadata and controls
executable file
·111 lines (94 loc) · 3.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/bin/bash
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019 Datadog, Inc.
set -e
LAYER_DIR=".layers"
LAYER_FILES=(
"datadog_lambda_py2.7.zip"
"datadog_lambda_py3.6.zip"
"datadog_lambda_py3.7.zip"
"datadog_lambda_py3.8.zip"
"datadog_lambda_py3.9.zip"
)
SIGNING_PROFILE_NAME="DatadogLambdaSigningProfile"
# Check account parameter
VALID_ACCOUNTS=("sandbox" "prod")
if [ -z "$1" ]; then
echo "ERROR: You must pass an account parameter to sign the layers"
exit 1
fi
if [[ ! "${VALID_ACCOUNTS[@]}" =~ $1 ]]; then
echo "ERROR: The account parameter was invalid. Please choose sandbox or prod."
exit 1
fi
if [ "$1" = "sandbox" ]; then
REGION="sa-east-1"
S3_BUCKET_NAME="dd-lambda-signing-bucket-sandbox"
fi
if [ "$1" = "prod" ]; then
REGION="us-east-1"
S3_BUCKET_NAME="dd-lambda-signing-bucket"
fi
for LAYER_FILE in "${LAYER_FILES[@]}"
do
echo
echo "${LAYER_FILE}"
echo "-------------------------"
LAYER_LOCAL_PATH="${LAYER_DIR}/${LAYER_FILE}"
# Upload the layer to S3 for signing
echo "Uploading layer to S3 for signing..."
UUID=$(uuidgen)
S3_UNSIGNED_ZIP_KEY="${UUID}.zip"
S3_UNSIGNED_ZIP_URI="s3://${S3_BUCKET_NAME}/${S3_UNSIGNED_ZIP_KEY}"
aws s3 cp $LAYER_LOCAL_PATH $S3_UNSIGNED_ZIP_URI
# Start a signing job
echo "Starting the signing job..."
SIGNING_JOB_ID=$(aws signer start-signing-job \
--source "s3={bucketName=${S3_BUCKET_NAME},key=${S3_UNSIGNED_ZIP_KEY},version=null}" \
--destination "s3={bucketName=${S3_BUCKET_NAME}}" \
--profile-name $SIGNING_PROFILE_NAME \
--region $REGION \
| jq -r '.jobId'\
)
# Wait for the signing job to complete
echo "Waiting for the signing job to complete..."
SECONDS_WAITED_SO_FAR=0
while :
do
sleep 3
SECONDS_WAITED_SO_FAR=$((SECONDS_WAITED_SO_FAR + 3))
SIGNING_JOB_DESCRIPTION=$(aws signer describe-signing-job \
--job-id $SIGNING_JOB_ID \
--region $REGION\
)
SIGNING_JOB_STATUS=$(echo $SIGNING_JOB_DESCRIPTION | jq -r '.status')
SIGNING_JOB_STATUS_REASON=$(echo $SIGNING_JOB_DESCRIPTION | jq -r '.statusReason')
if [ $SIGNING_JOB_STATUS = "Succeeded" ]; then
echo "Signing job succeeded!"
break
fi
if [ $SIGNING_JOB_STATUS = "Failed" ]; then
echo "ERROR: Signing job failed"
echo $SIGNING_JOB_STATUS_REASON
exit 1
fi
if [ $SECONDS_WAITED_SO_FAR -ge 60 ]; then
echo "ERROR: Timed out waiting for the signing job to complete"
exit 1
fi
echo "Signing job still in progress..."
done
# Download the signed ZIP, overwriting the original ZIP
echo "Replacing the local layer with the signed layer from S3..."
S3_SIGNED_ZIP_KEY="${SIGNING_JOB_ID}.zip"
S3_SIGNED_ZIP_URI="s3://${S3_BUCKET_NAME}/${S3_SIGNED_ZIP_KEY}"
aws s3 cp $S3_SIGNED_ZIP_URI $LAYER_LOCAL_PATH
# Delete the signed and unsigned ZIPs in S3
echo "Cleaning up the S3 bucket..."
aws s3api delete-object --bucket $S3_BUCKET_NAME --key $S3_UNSIGNED_ZIP_KEY
aws s3api delete-object --bucket $S3_BUCKET_NAME --key $S3_SIGNED_ZIP_KEY
done
echo
echo "Successfully signed all layers!"