-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdosvob.py
162 lines (133 loc) · 6.12 KB
/
dosvob.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import api
import datetime
import json
import os
import pathlib
import requests
import time
from glacier import do_glacier_pass
from util import execute
dosvob_ephemeral_tag = "dosvob-ephemeral"
has_dosvob_ephemeral_tag = lambda item: dosvob_ephemeral_tag in item["tags"]
conf = json.loads(pathlib.Path("conf.json").read_text())
token = conf["do_token"]
region = conf["region"]
dosvob_key_name = f"{dosvob_ephemeral_tag}-key"
# Healthchecks
if conf["healthchecks"] != "":
requests.get(f"{conf['healthchecks']}/start", timeout=10)
# Setup
manager = api.BaseAPI(token = token)
pathlib.Path('backups/glacier/history').mkdir(parents=True, exist_ok=True)
if not os.path.exists("backups/glacier/history/.git"):
execute("git -C backups/glacier/history init")
# Cleans up everything related to dosvob's execution
def cleanup():
# Clear droplets first because they might mount volumes
for droplet in manager.request("droplets", "GET", { 'tag_name': dosvob_ephemeral_tag })["droplets"]:
manager.request(f"droplets/{droplet['id']}", "DELETE")
# Clear snapshots next to give volumes time to demount properly (this may or may not be helpful; if it is, solve it better)
for snapshot in filter(has_dosvob_ephemeral_tag, manager.request("snapshots", "GET")["snapshots"]):
manager.request(f"snapshots/{snapshot['id']}", "DELETE")
# Clear volumes once the droplet is probably shut down
# For some reason, tagging volumes doesn't seem to work right, so we do it with name prefixes instead
for volume in filter(lambda item: item["name"].startswith(dosvob_ephemeral_tag), manager.request("volumes", "GET")["volumes"]):
manager.request(f"volumes/{volume['id']}", "DELETE")
# Clear SSH keys that we inserted
actionresult = manager.request(f"account/keys", "GET")
for key in actionresult["ssh_keys"]:
if key["name"] == dosvob_key_name:
manager.request(f"account/keys/{key['id']}", "DELETE")
try:
# Waits for an action to be complete
def waitfor(id):
while True:
actionresult = manager.request(f"actions/{id}", "GET")
if actionresult["action"]["status"] == "completed":
return
elif actionresult["action"]["status"] == "in-progress":
time.sleep(5)
else:
raise RuntimeError
# Clean up first, just so we're not stomping all over an old process
cleanup()
# Insert our SSH key
sshkeyid = manager.request(f"account/keys", "POST", {
'name': dosvob_key_name,
'public_key': pathlib.Path("~/.ssh/id_rsa.pub").expanduser().read_text(),
})["ssh_key"]["id"]
# Build the droplet that we'll be using for rsync
workerresponse = manager.request("droplets", "POST", {
'name': f'{dosvob_ephemeral_tag}-worker',
'region': region,
'image': 'debian-12-x64',
'size': 's-1vcpu-1gb',
'tags': [ dosvob_ephemeral_tag ],
'ssh_keys': [ sshkeyid ],
})
workerid = workerresponse["droplet"]["id"]
waitfor(workerresponse["links"]["actions"][0]["id"])
# Droplet starts in a powered-on state
# Get the external IP so we can connect to it
workeriplist = manager.request(f"droplets/{workerid}", "GET")["droplet"]["networks"]["v4"]
workerip = next(x for x in workeriplist if x["type"] == "public")["ip_address"]
print(f"Worker found at IP {workerip}")
# Install go, which we need for diskrsync; this also accepts our ssh key
# we actually need to retry every second until we succeed because it now takes a bit for the server to come up
while True:
try:
execute(f"ssh -o StrictHostKeyChecking=no root@{workerip} apt install -y golang")
break
except:
time.sleep(1)
# And now get diskrsync copied over
execute(f"scp /usr/local/bin/diskrsync root@{workerip}:/usr/local/bin/diskrsync")
snapshotslug = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
# Traverse over all volumes
volumes = manager.request("volumes", "GET")["volumes"]
for volume in volumes:
# Snapshot a volume
# Volume name length limited to 64 so we unfortunately have to limit our embedded name
name = f"dosvob-ephemeral--{volume['name'][:12]}--{snapshotslug}"
result = manager.request(f"volumes/{volume['id']}/snapshots", "POST", {
'name': name,
'tags': [ dosvob_ephemeral_tag ],
})["snapshot"]
# Make a volume from our snapshot
snapshotid = result["id"]
volumecopy = manager.request("volumes", "POST", {
'name': name,
'size_gigabytes': result["min_disk_size"],
'snapshot_id': snapshotid,
'tags': [ dosvob_ephemeral_tag ],
})["volume"]
volumecopyid = volumecopy["id"]
# Wipe the snapshot
manager.request(f"snapshots/{snapshotid}", "DELETE")
# Attach the volume to our worker
waitfor(manager.request(f"volumes/{volumecopyid}/actions", "POST", {
'type': 'attach',
'droplet_id': workerid,
})["action"]["id"])
# Volume always shows up as sda, so let's sync it over
execute(f"diskrsync --verbose --calc-progress --sync-progress --no-compress root@{workerip}:/dev/sda backups/{volume['name']}")
# Detach volume
waitfor(manager.request(f"volumes/{volumecopyid}/actions", "POST", {
'type': 'detach',
'droplet_id': workerid,
})["action"]["id"])
# Delete volume
manager.request(f"volumes/{volumecopyid}", "DELETE")
do_glacier_pass("backups", conf["snapshot_retention"])
if conf["healthchecks"] != "":
requests.get(f"{conf['healthchecks']}", timeout=10)
except:
print("Error! Cleaning up before returning.")
if conf["healthchecks"] != "":
requests.get(f"{conf['healthchecks']}/fail", timeout=10)
raise
finally:
# Cleanup everything remaining
cleanup()
# this is here entirely so I can easily comment out the cleanup when I'm developing :V
pass