forked from taskcluster/taskcluster
-
Notifications
You must be signed in to change notification settings - Fork 0
/
0059.yml
249 lines (246 loc) · 6.25 KB
/
0059.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
version: 59
description: Add task_queue_id column to tasks table and populate with online migration
migrationScript: |-
begin
alter table tasks add column task_queue_id text;
create function online_migration_v59_batch(batch_size_in integer, state_in jsonb)
returns table (count integer, state jsonb) as $$
declare
item record;
count integer;
last_task_id text;
begin
count := 0;
for item in
select task_id
from tasks
where
(state_in ->> 'task_id' is null or task_id > state_in ->> 'task_id') and
task_queue_id is null
order by task_id
limit batch_size_in
loop
update tasks
set task_queue_id = tasks.provisioner_id || '/' || tasks.worker_type
where tasks.task_id = item.task_id;
count := count + 1;
end loop;
return query select
count as count,
to_jsonb(item) as state;
end
$$ language plpgsql;
create function online_migration_v59_is_complete() returns boolean as $$
begin
perform * from tasks where task_queue_id is null limit 1;
return not found;
end
$$ language plpgsql;
end
downgradeScript: |-
begin
alter table tasks drop column task_queue_id;
end
methods:
create_task:
description: |-
Create a new task, without scheduling it, and with empty values
for the status information.
mode: write
serviceName: queue
args: |-
task_id text,
provisioner_id text,
worker_type text,
scheduler_id text,
task_group_id text,
dependencies jsonb,
requires task_requires,
routes jsonb,
priority task_priority,
retries integer,
created timestamptz,
deadline timestamptz,
expires timestamptz,
scopes jsonb,
payload jsonb,
metadata jsonb,
tags jsonb,
extra jsonb
returns: void
body: |-
begin
insert
into tasks (
task_id,
provisioner_id,
worker_type,
task_queue_id,
scheduler_id,
task_group_id,
dependencies,
requires,
routes,
priority,
retries,
created,
deadline,
expires,
scopes,
payload,
metadata,
tags,
extra,
retries_left,
runs,
taken_until,
ever_resolved
)
values (
task_id,
provisioner_id,
worker_type,
provisioner_id || '/' || worker_type,
scheduler_id,
task_group_id,
dependencies,
requires,
routes,
priority,
retries,
created,
deadline,
expires,
scopes,
payload,
metadata,
tags,
extra,
-- default values for the mutable bits
retries,
jsonb_build_array(),
null, -- not taken
false
);
end
get_task:
description: |-
Get all properties of a task. Note that all properties but `runs`,
`retries_left`, and `taken_until` are immutable.
mode: read
serviceName: queue
args: task_id_in text
returns: |-
table (
task_id text,
provisioner_id text,
worker_type text,
scheduler_id text,
task_group_id text,
dependencies jsonb,
requires task_requires,
routes jsonb,
priority task_priority,
retries integer,
retries_left int,
created timestamptz,
deadline timestamptz,
expires timestamptz,
scopes jsonb,
payload jsonb,
metadata jsonb,
tags jsonb,
extra jsonb,
runs jsonb,
taken_until timestamptz
)
body: |-
begin
return query
select
tasks.task_id,
coalesce(tasks.provisioner_id, split_part(tasks.task_queue_id, '/', 1)),
coalesce(tasks.worker_type, split_part(tasks.task_queue_id, '/', 2)),
tasks.scheduler_id,
tasks.task_group_id,
tasks.dependencies,
tasks.requires,
tasks.routes,
tasks.priority,
tasks.retries,
tasks.retries_left,
tasks.created,
tasks.deadline,
tasks.expires,
tasks.scopes,
tasks.payload,
tasks.metadata,
tasks.tags,
tasks.extra,
tasks.runs,
tasks.taken_until
from tasks
where
tasks.task_id = task_id_in;
end
get_tasks_by_task_group:
description: |-
Get all properties of all tasks in the given task group.
deprecated: true
mode: read
serviceName: queue
args: task_group_id_in text, page_size_in integer, page_offset_in integer
returns: |-
table (
task_id text,
provisioner_id text,
worker_type text,
scheduler_id text,
task_group_id text,
dependencies jsonb,
requires task_requires,
routes jsonb,
priority task_priority,
retries integer,
retries_left int,
created timestamptz,
deadline timestamptz,
expires timestamptz,
scopes jsonb,
payload jsonb,
metadata jsonb,
tags jsonb,
extra jsonb,
runs jsonb,
taken_until timestamptz
)
body: |-
begin
return query
select
tasks.task_id,
coalesce(tasks.provisioner_id, split_part(tasks.task_queue_id, '/', 1)),
coalesce(tasks.worker_type, split_part(tasks.task_queue_id, '/', 2)),
tasks.scheduler_id,
tasks.task_group_id,
tasks.dependencies,
tasks.requires,
tasks.routes,
tasks.priority,
tasks.retries,
tasks.retries_left,
tasks.created,
tasks.deadline,
tasks.expires,
tasks.scopes,
tasks.payload,
tasks.metadata,
tasks.tags,
tasks.extra,
tasks.runs,
tasks.taken_until
from tasks
where tasks.task_group_id = task_group_id_in
limit get_page_limit(page_size_in)
offset get_page_offset(page_offset_in);
end