Skip to content

Commit

Permalink
#9510: added reproduction and minimal compute code that repros the de…
Browse files Browse the repository at this point in the history
…terministic reconfig dataformat hang
  • Loading branch information
caixunshiren committed Jun 21, 2024
1 parent 0c29a79 commit 191b108
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 90 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def get_chunk_size(s):
# start_idx = 32

# while start_idx < s:
for start_idx in [32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768]:
for start_idx in [64]: # [4096, 8192, 16384, 32768]:
scale = d**-0.5

k_chunk_size = get_chunk_size(start_idx)
Expand Down Expand Up @@ -317,18 +317,18 @@ def get_chunk_size(s):
@pytest.mark.parametrize(
"dtype, q_dtype, mask_dtype",
[
[tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT8_B],
# [tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT8_B],
[tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT16],
[tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT8_B],
[tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT4_B],
[tt_lib.tensor.DataType.BFLOAT4_B, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT4_B],
# [tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT8_B],
# [tt_lib.tensor.DataType.BFLOAT8_B, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT4_B],
# [tt_lib.tensor.DataType.BFLOAT4_B, tt_lib.tensor.DataType.BFLOAT16, tt_lib.tensor.DataType.BFLOAT4_B],
],
ids=[
"all_bfp8",
# "all_bfp8",
"all_bfp16",
"kvmask_bfp8",
"kv_bfp8_mask_bfp4",
"kvmask_bfp4",
# "kvmask_bfp8",
# "kv_bfp8_mask_bfp4",
# "kvmask_bfp4",
],
)
@pytest.mark.parametrize(
Expand All @@ -338,7 +338,9 @@ def get_chunk_size(s):
# [1, 8, 1, 32768, 128], # Llama2-70B
# [8, 8, 1, 32768, 128, (8,4)], # Llama2-70B
# [12, 8, 1, 32768, 128], # Llama2-70B
[16, 8, 1, 32768, 128, (8, 6)], # Llama2-70B
[16, 8, 1, 32768, 128, (8, 4)], # Llama2-70B
# [8, 8, 1, 32768, 128, (8, 6)], # Llama2-70B
# [4, 8, 1, 32768, 128, (8, 6)], # Llama2-70B
# [16, 8, 1, 32768, 128, (8,6)], # Llama2-70B
# [16, 8, 1, 32768, 128, (8,7)], # Llama2-70B
# [16, 8, 1, 32768, 128, (8,8)], # Llama2-70B
Expand Down
Loading

0 comments on commit 191b108

Please sign in to comment.