Skip to content

Commit

Permalink
re add examples by default
Browse files Browse the repository at this point in the history
  • Loading branch information
Cyrilvallez committed Jan 8, 2025
1 parent 03d804c commit 54e9d2d
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 7 deletions.
2 changes: 1 addition & 1 deletion examples/modular-transformers/modeling_dummy.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ def _update_causal_mask(
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None

Expand Down
2 changes: 1 addition & 1 deletion examples/modular-transformers/modeling_multimodal1.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ def _update_causal_mask(
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None

Expand Down
2 changes: 1 addition & 1 deletion examples/modular-transformers/modeling_my_new_model2.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ def _update_causal_mask(
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None

Expand Down
2 changes: 1 addition & 1 deletion examples/modular-transformers/modeling_super.py
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ def _update_causal_mask(
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None

Expand Down
7 changes: 4 additions & 3 deletions utils/create_dependency_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,11 @@ def topological_sort(dependencies):
new_dependencies = {}
graph = defaultdict(list)
for node, deps in dependencies.items():
node_name = node.rsplit("modular_", 1)[1].replace(".py", "")
for dep in deps:
if "example" not in node and "auto" not in dep:
graph[dep.split(".")[-2]].append(node.split("/")[-2])
new_dependencies[node.split("/")[-2]] = node
if "auto" not in dep:
graph[dep.split(".")[-2]].append(node_name)
new_dependencies[node_name] = node

# Create a graph and in-degree count for each node
def filter_one_by_one(filtered_list, reverse):
Expand Down

0 comments on commit 54e9d2d

Please sign in to comment.