-
Notifications
You must be signed in to change notification settings - Fork 16
/
jenkins-cs-swift-five-node
executable file
·311 lines (266 loc) · 10.1 KB
/
jenkins-cs-swift-five-node
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
#!/bin/bash
#
# $1 = test repo number
#
# force a local default so this can be run on command line
DISTRELEASE=${DISTRELEASE-ubuntu-precise}
# PACKAGE_REPO=${PACKAGE_REPO-proposed-packages}
AUTHTYPE=${AUTHTYPE-swauth}
[ -e $(dirname $0)/jenkins-deb-common ] || exit 1
. $(dirname $0)/jenkins-deb-common
jenkins_init
jenkins_set_vars
tmp_chef_env=${BINARY_BUILD_RELEASE-precise}-${AUTHTYPE-keystone}-${PACKAGE-tty}
chef_env="`echo ${tmp_chef_env} | tr A-Z a-z`"
function err_cleanup_chef() {
echo "*** In the trap handler err_cleanup_chef"
if [ "${NOCLEAN-0}" == "1" ]; then
exit 0
fi
knife environment delete ${chef_env} -y || :
for host in ${host_info[@]}; do
name=$(echo $host | cut -d':' -f1)
role=$(echo $host | cut -d':' -f2)
ip=$(echo $host | cut -d':' -f3)
echo "capturing stack trace output if it exists. Errors are OK here"
ssh root@${ip} 'cat /var/chef/cache/chef-stacktrace.out || :'
echo "destroying $host"
# Delete node and client from chef-server
knife node delete ${name} -y || :
knife client delete ${name} -y || :
for vd in $(echo {b..d}); do
sudo virsh detach-disk ${name} vd${vd} || :
sudo rm -f /tmp/${name}-${vd}.img || :
done
sudo virsh destroy ${name}
sleep 5
sudo lvremove -f ${LVM_ROOT}/${name}
for vd in $(echo {b..d}); do
if [ -e "/tmp/${name}-${vd}.img" ]; then
sudo rm -f /tmp/${name}-${vd}.img
fi
done
done
exit 1
}
function get_ip_by_role() {
# return the first IP that matches the role
# $1 - role
for host in ${host_info[@]}; do
role=$(echo $host | cut -d':' -f2)
ip=$(echo $host | cut -d':' -f3)
OLD_IFS=$IFS
IFS=","
for var in ${role}; do
if [ "role[${1}]" == "${var}" ]; then
T_IP=${ip}
fi
done
IFS=$OLD_IFS
done
}
# This is an array of the roles you want to build for the test
# Valid entries are in the form of <hostname>:role[<role>],...
# the naming of these is extremely important - they must match the chef roles
declare -a nodes
if [ ${AUTHTYPE} == "keystone" ]; then
nodes=( node1:role[base],role[mysql-master],role[keystone],role[swift-management-server]
node2:role[base],role[swift-proxy-server],recipe[kong],recipe[exerstack]
node3:role[base],role[swift-object-server],role[swift-container-server],role[swift-account-server]
node4:role[base],role[swift-object-server],role[swift-container-server],role[swift-account-server]
node5:role[base],role[swift-object-server],role[swift-container-server],role[swift-account-server]
)
else
nodes=( node1:role[base],recipe[kong],recipe[exerstack]
node2:role[base]
node3:role[base]
)
fi
chef_env_set=0
# lets create the chef environment
tmp_env=$(mktemp)
cat ~/jenkins-build/files/${AUTHTYPE}-swift-environment.json | sed -e "s/REPLACEME/${chef_env}/" > ${tmp_env}.json
if ! ( knife environment from file ${tmp_env}.json ); then
echo "Unable to create environment from ${tmp_env}.json"
exit 1
else
rm ${tmp_env}.json
chef_env_set=1
knife environment show ${chef_env} -Fj
fi
# Lets configure name and ip for each of the nodes
declare -a host_info
for node in ${nodes[@]}; do
name=$(echo $node | cut -d':' -f1)
role=$(echo $node | cut -d':' -f2)
node_name=${chef_env}-$name
get_ip $node_name
node_ip=${IP}
echo "building host_info ${node_name}:${role}:${node_ip}"
host_info[${#host_info[@]}]="${node_name}:${role}:${node_ip}"
done
# global memory spec for all vms built
MEMORY=2048000
# Spinup VMs
for host in ${host_info[@]}; do
echo $host
name=$(echo $host | cut -d':' -f1)
role=$(echo $host | cut -d':' -f2)
ip=$(echo $host | cut -d':' -f3)
maybe_make_kvm_instance $name
start_kvm_instance $name $ip http://archive.ubuntu.com/ubuntu precise main
ssh root@${ip} "modprobe acpiphp"
if [[ ${role} =~ "container-server" ]] ||
[[ ${role} =~ "object-server" ]] ||
[[ ${role} =~ "account-server" ]]; then
for vd in $(echo {b..d}); do
sudo dd if=/dev/zero of=/tmp/${name}-${vd}.img bs=1M seek=1024 count=0
sync
ssh root@${ip} 'find /sys -path "*pci*scsi*" -name "scan" | xargs -i -n1 /bin/bash -c "echo - - - > {}"'
sleep 1
sudo virsh attach-disk ${name} /tmp/${name}-${vd}.img vd${vd}
done
fi
ssh root@${ip} "cat /proc/partitions"
done
trap "err_cleanup_chef" SIGINT SIGTERM ERR
for host in ${!host_info[@]}; do
name=$(echo ${host_info[$host]} | cut -d':' -f1)
role=$(echo ${host_info[$host]} | cut -d':' -f2)
ip=$(echo ${host_info[$host]} | cut -d':' -f3)
# ssh root@${ip} "ifconfig eth0"
if [ ${BINARY_BUILD_VARIANT} = "debian" ]; then
ssh root@${ip} apt-get update -y --force-yes
ssh root@${ip} "DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --force-yes"
ssh root@${ip} apt-get install vlan -y --force-yes
else
ssh root@${ip} rm -f /etc/cron.daily/man-db.cron
#ssh root@${ip} "/sbin/iptables -I INPUT 1 -s 192.168.1.0/24 -j ACCEPT"
#ssh root@${ip} "/sbin/iptables -I INPUT 1 -s 192.168.100.0/24 -j ACCEPT"
ssh root@${ip} yum -y --exclude=kernel* update
ssh root@${ip} yum install -y yum-fastestmirror redhat-lsb
# ssh root@${ip} modprobe nbd
ssh root@${ip} "/sbin/iptables -F"
ssh root@${ip} "/sbin/sysctl -w net.ipv4.ip_forward=1"
fi
# ssh root@${ip} vconfig add eth0 100
# ssh root@${ip} vconfig add eth0 101
if [ ${BINARY_BUILD_VARIANT} = "debian" ]; then
# Trying to fix a bug
count=1
while ! ssh root@${ip} "DEBIAN_FRONTEND=noninteractive apt-get install cgroup-lite -y --force-yes"; do
if [ $count -gt 3 ]; then
echo "Unable to install cgroup-lite"
exit 1
fi
if ! ( ssh root@${ip} "/usr/bin/cgroups-mount" ); then
:
fi
sleep 2;
count=$(( count + 1 ));
done
fi
# Install system pre-reqs
if [ ${BINARY_BUILD_VARIANT} = "debian" ]; then
ssh root@${ip} "apt-get install -y wget curl build-essential automake"
else
ssh root@${ip} "yum install -y wget tar"
# #ssh root@${ip} "mkdir RPMS; cd RPMS; wget http://184.106.53.105/RPMS/euca2ools-2.0.1-0.1.fc17.noarch.rpm"
# #ssh root@${ip} "cd RPMS; wget http://184.106.53.105/RPMS/python-boto-2.1.1-0.2.fc17.noarch.rpm"
# #ssh root@${ip} "cd RPMS; yum install -y *.rpm"
fi
# # Try to quiet installer
# ssh root@${ip} 'echo "alias wget=\"wget -q\"" >> ~/.bashrc '
# Install ruby/chef via opscode's fullstack installer
if ! ( ssh root@${ip} "bash < <(curl -s http://s3.amazonaws.com/opscode-full-stack/install.sh)" ); then
echo "Unable to install Opscode FullStack Installer"
err_cleanup_chef
exit 1
fi
# Setup Hosted Chef
#ssh root@${ip} 'wget -q http://c308412.r12.cf1.rackcdn.com/chef.tar.gz; tar -xvzf chef.tar.gz; knife configure client ./client-config'
# Setup chef to use our jenkins server
ssh root@${ip} 'wget -q http://c308412.r12.cf1.rackcdn.com/chef-server.tgz; tar -xvzf chef-server.tgz; knife configure client ./client-config'
ssh root@${ip} "echo 'environment \"${chef_env}\"' >> ~/client-config/client.rb"
# TODO: DELETE ME
#if [ $chef_env_set -eq 0 ]; then
# EDITOR=/bin/true knife environment create ${chef_env} -d ${chef_env}
# chef_env_set=1
#fi
if ! ( ssh root@${ip} "mkdir /etc/chef; cp -r ~/client-config/* /etc/chef/; chef-client" ); then
echo "Unable to register with chef-server"
err_cleanup_chef
exit 1
fi
# Add swift specific node attributes
node_json=$(mktemp)
zone=$(( $host + 1 ))
knife node show ${name} -Fj | sed -e "s/\"normal\": {/\"json_class\": \"Chef::Node\",\"normal\": { \"swift\": { \"zone\": $zone },/" > ${node_json}.json
if ! ( knife node from file ${node_json}.json ); then
echo "Unable to update node attributes from ${node_json}.json"
err_cleanup_chef
exit 1
else
rm ${node_json}.json
knife node show ${name} -Fj
fi
OLD_IFS=$IFS
IFS=","
for role_variant in ${role}; do
knife node run_list add ${name} ${role_variant}
done
IFS=$OLD_IFS
ssh root@${ip} "chef-client"
done
sleep 20s
# Swift needs a bunch of runs
for x in 1 2 3 4; do
for host in ${!host_info[@]}; do
name=$(echo ${host_info[$host]} | cut -d':' -f1)
role=$(echo ${host_info[$host]} | cut -d':' -f2)
ip=$(echo ${host_info[$host]} | cut -d':' -f3)
echo "=== RUN ${x} for ${name} ==="
ssh root@${ip} "chef-client"
echo "=== SERVICE STATUS RUN ${x} for ${name}"
ssh root@${ip} "netstat -ntpl"
if ! (ssh root@${ip} "ps auxw | grep swif[t]"); then
echo "No swift services running"
fi
echo "Sleeping to wait for stoopid solr to catch up"
sleep 20
done
done
echo "running tests against the swift-management node"
get_ip_by_role "swift-proxy-server"
proxy_ip=$T_IP
# uncomment this once swift is deployable
if ! ( ssh root@${proxy_ip} 'cd /opt/exerstack; ./exercise.sh essex-final keystone.sh swift.sh' ); then
echo "Exerstack test suite failed"
err_cleanup_chef
exit 1
fi
if ! ( ssh root@${proxy_ip} 'cd /opt/kong; ./run_tests.sh --version essex-final --swift' ); then
echo "Kong test suite failed"
err_cleanup_chef
exit 1
fi
trap - SIGINT SIGTERM EXIT ERR
# Cleanup after the run
knife environment delete ${chef_env} -y || :
for host in ${host_info[@]}; do
name=$(echo $host | cut -d':' -f1)
node=$(echo $host | cut -d':' -f2)
ip=$(echo $host | cut -d':' -f3)
# Delete node and client from chef-server
knife node delete ${name} -y || :
knife client delete ${name} -y || :
for vd in $(echo {b..d}); do
if [ -e /tmp/${name}-${vd}.img ]; then
sudo virsh detach-disk ${name} vd${vd}
sudo rm -f /tmp/${name}-${vd}.img
fi
done
sudo virsh destroy ${name}
sleep 5
sudo lvremove -f ${LVM_ROOT}/${name}
done