Skip to content

Commit

Permalink
Fix error with inconsistent blobsize in flaky test
Browse files Browse the repository at this point in the history
Fixing this bug will probably mitigate flakyness
  • Loading branch information
berland committed Nov 13, 2024
1 parent d11ba38 commit 648aa27
Showing 1 changed file with 9 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def test_cpu_seconds_can_detect_multiprocess():
@pytest.mark.usefixtures("use_tmpdir")
def test_memory_usage_counts_grandchildren():
scriptname = "recursive_memory_hog.py"
blobsize = 1e7
with open(scriptname, "w", encoding="utf-8") as script:
script.write(
textwrap.dedent(
Expand All @@ -109,11 +110,15 @@ def test_memory_usage_counts_grandchildren():
import time
counter = int(sys.argv[-2])
numbers = list(range(int(sys.argv[-1])))
blobsize = int(sys.argv[-1])
# Allocate memory
_blob = list(range(blobsize))
if counter > 0:
parent = os.fork()
if not parent:
os.execv(sys.argv[-3], [sys.argv[-3], str(counter - 1), str(int(1e7))])
os.execv(sys.argv[-3], [sys.argv[-3], str(counter - 1), str(blobsize)])
time.sleep(3)""" # Too low sleep will make the test faster but flaky
)
)
Expand All @@ -124,7 +129,7 @@ def max_memory_per_subprocess_layer(layers: int) -> int:
fmstep = ForwardModelStep(
{
"executable": executable,
"argList": [str(layers), str(int(1e6))],
"argList": [str(layers), str(int(blobsize))],
},
0,
)
Expand All @@ -139,7 +144,7 @@ def max_memory_per_subprocess_layer(layers: int) -> int:
# comparing the memory used with different amounts of forks done.
# subtract a little bit (* 0.9) due to natural variance in memory used
# when running the program.
memory_per_numbers_list = sys.getsizeof(int(0)) * 1e7 * 0.90
memory_per_numbers_list = sys.getsizeof(int(0)) * blobsize * 0.90

max_seens = [max_memory_per_subprocess_layer(layers) for layers in range(3)]
assert max_seens[0] + memory_per_numbers_list < max_seens[1]
Expand Down

0 comments on commit 648aa27

Please sign in to comment.