-
Notifications
You must be signed in to change notification settings - Fork 54
519 lines (465 loc) · 21 KB
/
memcheck.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
name: Memory Check
on:
# tests must run for a PR to be valid and pass merge queue muster
# on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors
# the merge run checks should show on master and enable this clear test/passing history
merge_group:
branches: [main, alpha*, beta*, rc*]
pull_request:
branches: ["*"]
env:
SAFE_DATA_PATH: /home/runner/.local/share/safe
CLIENT_DATA_PATH: /home/runner/.local/share/safe/client
NODE_DATA_PATH: /home/runner/.local/share/safe/node
BOOTSTRAP_NODE_DATA_PATH: /home/runner/.local/share/safe/bootstrap_node
RESTART_TEST_NODE_DATA_PATH: /home/runner/.local/share/safe/restart_node
FAUCET_LOG_PATH: /home/runner/.local/share/safe/test_faucet/logs
jobs:
memory-check:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check we're on the right commit
run: git log -1 --oneline
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
continue-on-error: true
- name: install ripgrep
shell: bash
run: sudo apt-get install -y ripgrep
- name: Build binaries
run: cargo build --release --bin safe --bin safenode
timeout-minutes: 30
- name: Build faucet binary with gifting
run: cargo build --release --bin faucet --features gifting
timeout-minutes: 30
- name: Build tests
run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run
timeout-minutes: 30
- name: Start a node instance that does not undergo churn
run: |
mkdir -p $BOOTSTRAP_NODE_DATA_PATH
./target/release/safenode --first \
--root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap &
sleep 10
env:
SN_LOG: "all"
- name: Set SAFE_PEERS
run: |
safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \
rg '/ip4.*$' -m1 -o | rg '"' -r '')
echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV
- name: Check SAFE_PEERS was set
shell: bash
run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS"
- name: Start a node instance to be restarted
run: |
mkdir -p $RESTART_TEST_NODE_DATA_PATH
./target/release/safenode \
--root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart &
sleep 10
env:
SN_LOG: "all"
- name: Start a local network
env:
SN_LOG: "all"
uses: maidsafe/sn-local-testnet-action@main
with:
action: start
build: true
faucet-path: target/release/faucet
interval: 2000
join: true
node-path: target/release/safenode
owner-prefix: node
platform: ubuntu-latest
set-safe-peers: false
# In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet
- name: Check SAFE_PEERS was not changed
shell: bash
run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}"
- name: Create and fund a wallet to pay for files storage
run: |
echo "Obtaining address for use with the faucet..."
address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1)
echo "Sending tokens to the faucet at $address"
./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt
cat initial_balance_from_faucet.txt
cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex
cat transfer_hex
./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
env:
SN_LOG: "all"
timeout-minutes: 15
- name: Move faucet log to the working folder
run: |
echo "SAFE_DATA_PATH has: "
ls -l $SAFE_DATA_PATH
echo "test_faucet foder has: "
ls -l $SAFE_DATA_PATH/test_faucet
echo "logs folder has: "
ls -l $SAFE_DATA_PATH/test_faucet/logs
mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log
continue-on-error: true
if: always()
timeout-minutes: 1
- name: Download 95mb file to be uploaded with the safe client
shell: bash
run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip
# The resources file we upload may change, and with it mem consumption.
# Be aware!
- name: Start a client to upload files
# -p makes files public
run: |
ls -l
./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p
env:
SN_LOG: "all"
timeout-minutes: 25
# this check needs to be after some transfer activity
- name: Check we're warned about using default genesis
run: |
git log -1 --oneline
ls -la $RESTART_TEST_NODE_DATA_PATH
cat $RESTART_TEST_NODE_DATA_PATH/safenode.log
- name: Check we're warned about using default genesis
run: |
git log -1 --oneline
ls -la $BOOTSTRAP_NODE_DATA_PATH
cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log
- name: Check we're warned about using default genesis
run: |
git log -1 --oneline
ls -la $NODE_DATA_PATH
rg "USING DEFAULT" "$NODE_DATA_PATH" -u
shell: bash
# Uploading same file using different client shall not incur any payment neither uploads
# Note rg will throw an error directly in case of failed to find a matching pattern.
- name: Start a different client to upload the same file
run: |
pwd
mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first
ls -l $SAFE_DATA_PATH
ls -l $SAFE_DATA_PATH/client_first
mkdir $SAFE_DATA_PATH/client
ls -l $SAFE_DATA_PATH
mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs
ls -l $CLIENT_DATA_PATH
cp ./the-test-data.zip ./the-test-data_1.zip
./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt
cat initial_balance_from_faucet_1.txt
cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex
cat transfer_hex
./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt
cat second_upload.txt
rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats
env:
SN_LOG: "all"
timeout-minutes: 25
- name: Stop the restart node
run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid )
- name: Start the restart node again
run: |
./target/release/safenode \
--root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted &
sleep 10
env:
SN_LOG: "all"
- name: Assert we've reloaded some chunks
run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH
- name: Chunks data integrity during nodes churn
run: cargo test --release -p sn_node --test data_with_churn -- --nocapture
env:
TEST_DURATION_MINS: 5
TEST_TOTAL_CHURN_CYCLES: 15
SN_LOG: "all"
timeout-minutes: 30
- name: Check current files
run: ls -la
- name: Check safenode file
run: ls /home/runner/work/safe_network/safe_network/target/release
- name: Check there was no restart issues
run: |
if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then
echo "Restart issues detected"
exit 1
else
echo "No restart issues detected"
fi
- name: Verify the routing tables of the nodes
run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture
env:
SLEEP_BEFORE_VERIFICATION: 300
timeout-minutes: 10
- name: Verify restart of nodes using rg
shell: bash
timeout-minutes: 1
# get the counts, then the specific line, and then the digit count only
# then check we have an expected level of restarts
# TODO: make this use an env var, or relate to testnet size
run: |
restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "Restart $restart_count nodes"
peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "PeerRemovedFromRoutingTable $peer_removed times"
if [ $peer_removed -lt $restart_count ]; then
echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count"
exit 1
fi
node_count=$(ls $NODE_DATA_PATH | wc -l)
echo "Node dir count is $node_count"
# TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here
# if [ $restart_count -lt $node_count ]; then
# echo "Restart count of: $restart_count is less than the node count of: $node_count"
# exit 1
# fi
- name: Verify data replication using rg
shell: bash
timeout-minutes: 1
# get the counts, then the specific line, and then the digit count only
# then check we have an expected level of replication
# TODO: make this use an env var, or relate to testnet size
# As the bootstrap_node using separate folder for logging,
# hence the folder input to rg needs to cover that as well.
run: |
sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "Sent $sending_list_count replication lists"
received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "Received $received_list_count replication lists"
fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "Carried out $fetching_attempt_count fetching attempts"
if: always()
- name: Start a client to download files
run: |
./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick
ls -l $CLIENT_DATA_PATH/safe_files
downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l)
if [ $downloaded_files -lt 1 ]; then
echo "Only downloaded $downloaded_files files, less than the 1 file uploaded"
exit 1
fi
env:
SN_LOG: "all"
timeout-minutes: 10
# Download the same files again to ensure files won't get corrupted.
- name: Start a client to download the same files again
run: |
./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick
ls -l $CLIENT_DATA_PATH/safe_files
downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l)
if [ $downloaded_files -lt 1 ]; then
echo "Only downloaded $downloaded_files files, less than the 1 file uploaded"
exit 1
fi
file_size1=$(stat -c "%s" ./the-test-data_1.zip)
file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip)
if [ $file_size1 != $file_size2 ]; then
echo "The downloaded file has a different size $file_size2 to the original $file_size1."
exit 1
fi
env:
SN_LOG: "all"
timeout-minutes: 10
- name: Audit from genesis to collect entire spend DAG and dump to a dot file
run: |
./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt
echo "=============================================================================="
cat spend_dag_and_statistics.txt
env:
SN_LOG: "all"
timeout-minutes: 5
if: always()
- name: Ensure discord_ids decrypted
run: |
rg 'node_' ./spend_dag_and_statistics.txt -o
timeout-minutes: 1
if: always()
- name: Check nodes running
shell: bash
timeout-minutes: 1
continue-on-error: true
run: pgrep safenode | wc -l
if: always()
- name: Wait before verifying reward forwarding
run: sleep 300
- name: Stop the local network and upload logs
if: always()
uses: maidsafe/sn-local-testnet-action@main
with:
action: stop
log_file_prefix: safe_test_logs_memcheck
platform: ubuntu-latest
build: true
- name: Check node memory usage
shell: bash
# The resources file and churning chunk_size we upload may change, and with it mem consumption.
# This is set to a value high enough to allow for some variation depending on
# resources and node location in the network, but hopefully low enough to catch
# any wild memory issues
# Any changes to this value should be carefully considered and tested!
# As we have a bootstrap node acting as an access point for churning nodes and client,
# The memory usage here will be significantly higher here than in the benchmark test,
# where we don't have a bootstrap node.
run: |
node_peak_mem_limit_mb="300" # mb
peak_mem_usage=$(
rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/{print $2}' |
sort -n |
tail -n 1
)
echo "Node memory usage: $peak_mem_usage MB"
if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then
echo "Node memory usage exceeded threshold: $peak_mem_usage MB"
exit 1
fi
if: always()
- name: Check client memory usage
shell: bash
# limits here are lower that benchmark tests as there is less going on.
run: |
client_peak_mem_limit_mb="1024" # mb
client_avg_mem_limit_mb="512" # mb
peak_mem_usage=$(
rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/{print $2}' |
sort -n |
tail -n 1
)
echo "Peak memory usage: $peak_mem_usage MB"
if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then
echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB"
exit 1
fi
total_mem=$(
rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename |
awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}'
)
num_of_times=$(
rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
echo "num_of_times: $num_of_times"
echo "Total memory is: $total_mem"
average_mem=$(($total_mem/$(($num_of_times))))
echo "Average memory is: $average_mem"
if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then
echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB"
exit 1
fi
- name: Check node swarm_driver handling statistics
shell: bash
# With the latest improvements, swarm_driver will be in high chance
# has no super long handling (longer than 1s).
# As the `rg` cmd will fail the shell directly if no entry find,
# hence not covering it.
# Be aware that if do need to looking for handlings longer than second, it shall be:
# rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats
run: |
num_of_times=$(
rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
echo "Number of long cmd handling times: $num_of_times"
total_long_handling_ms=$(
rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
)
echo "Total cmd long handling time is: $total_long_handling_ms ms"
average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
echo "Average cmd long handling time is: $average_handling_ms ms"
total_long_handling=$(($total_long_handling_ms))
total_num_of_times=$(($num_of_times))
num_of_times=$(
rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats |
rg "(\d+) matches" |
rg "\d+" -o
)
echo "Number of long event handling times: $num_of_times"
total_long_handling_ms=$(
rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename |
awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}'
)
echo "Total event long handling time is: $total_long_handling_ms ms"
average_handling_ms=$(($total_long_handling_ms/$(($num_of_times))))
echo "Average event long handling time is: $average_handling_ms ms"
total_long_handling=$(($total_long_handling_ms+$total_long_handling))
total_num_of_times=$(($num_of_times+$total_num_of_times))
average_handling_ms=$(($total_long_handling/$(($total_num_of_times))))
echo "Total swarm_driver long handling times is: $total_num_of_times"
echo "Total swarm_driver long handling duration is: $total_long_handling ms"
echo "Total average swarm_driver long handling duration is: $average_handling_ms ms"
- name: Verify reward forwarding using rg
shell: bash
timeout-minutes: 1
run: |
min_reward_forwarding_times="100"
reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \
rg "(\d+) matches" | rg "\d+" -o)
echo "Carried out $reward_forwarding_count reward forwardings"
if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then
echo "Reward forwarding times below the threshold: $min_reward_forwarding_times"
exit 1
fi
if: always()
- name: Upload payment wallet initialization log
uses: actions/upload-artifact@main
with:
name: payment_wallet_initialization_log
path: initial_balance_from_faucet.txt
continue-on-error: true
if: always()
- name: Move faucet log to the working folder
run: |
echo "current folder is:"
pwd
echo "SAFE_DATA_PATH has: "
ls -l $SAFE_DATA_PATH
echo "test_faucet foder has: "
ls -l $SAFE_DATA_PATH/test_faucet
echo "logs folder has: "
ls -l $SAFE_DATA_PATH/test_faucet/logs
mv $FAUCET_LOG_PATH/*.log ./faucet_log.log
env:
SN_LOG: "all"
continue-on-error: true
if: always()
timeout-minutes: 1
- name: Move bootstrap_node log to the working directory
run: |
ls -l $BOOTSTRAP_NODE_DATA_PATH
mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log
continue-on-error: true
if: always()
timeout-minutes: 1
- name: Upload faucet log
uses: actions/upload-artifact@main
with:
name: memory_check_faucet_log
path: faucet_log.log
continue-on-error: true
if: always()
- name: Upload bootstrap_node log
uses: actions/upload-artifact@main
with:
name: memory_check_bootstrap_node_log
path: bootstrap_node.log
continue-on-error: true
if: always()
- name: Upload spend DAG and statistics
uses: actions/upload-artifact@main
with:
name: memory_check_spend_dag_and_statistics
path: spend_dag_and_statistics.txt
continue-on-error: true
if: always()