Skip to content

Commit

Permalink
..
Browse files Browse the repository at this point in the history
  • Loading branch information
ShashankMosaicML committed Dec 4, 2024
1 parent fef3a5d commit f6c66e8
Showing 1 changed file with 4 additions and 0 deletions.
4 changes: 4 additions & 0 deletions llmfoundry/models/layers/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -1034,6 +1034,7 @@ def __init__(
reuse_kv_layer_idx: Optional[int] = None,
attn_logit_softcapping: Optional[float] = None,
kv_dim: Optional[int] = None,
flex_attn_mod_list: Optional[list[dict[str, Any]]] = None,
):
super().__init__(
d_model=d_model,
Expand All @@ -1055,6 +1056,7 @@ def __init__(
reuse_kv_layer_idx=reuse_kv_layer_idx,
attn_logit_softcapping=attn_logit_softcapping,
kv_dim=kv_dim,
flex_attn_mod_list=flex_attn_mod_list,
)


Expand Down Expand Up @@ -1085,6 +1087,7 @@ def __init__(
reuse_kv_layer_idx: Optional[int] = None,
attn_logit_softcapping: Optional[float] = None,
kv_dim: Optional[int] = None,
flex_attn_mod_list: Optional[list[dict[str, Any]]] = None,
):
super().__init__(
d_model=d_model,
Expand All @@ -1106,6 +1109,7 @@ def __init__(
reuse_kv_layer_idx=reuse_kv_layer_idx,
attn_logit_softcapping=attn_logit_softcapping,
kv_dim=kv_dim,
flex_attn_mod_list=flex_attn_mod_list,
)


Expand Down

0 comments on commit f6c66e8

Please sign in to comment.