Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
zhenglongjiepheonix committed Jul 13, 2024
1 parent 8ec6727 commit 5095f1e
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 4 deletions.
4 changes: 2 additions & 2 deletions optimum/fx/parallelization/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ class ParallelExecutionCtx:
- example_inputs (`List[Any]`):
A list of tensors which are used as example inputs for graphs captured by dynamo.
- parallel_layer_cache (`Dict[int, nn.Module]`):
- parallel_layer_cache (`Dict[str, nn.Module]`):
Cache which maps layers(`nn.Linear`, `nn.Embedding`) to their parallel counterparts.
Note that we will build the cache in the first compilation process, and for recompilations
later on, we will directly replace the modules with their parallel counterparts in the cache,
Expand All @@ -135,7 +135,7 @@ class ParallelExecutionCtx:
tp_group: dist.ProcessGroup
current_device: torch.device
example_inputs: List[Any] = field(default_factory=list)
parallel_layer_cache: Dict[int, nn.Module] = field(default_factory=dict)
parallel_layer_cache: Dict[str, nn.Module] = field(default_factory=dict)
weight_map: Dict[str, str] = field(default_factory=dict)
compile_times: int = 0

Expand Down
4 changes: 2 additions & 2 deletions optimum/fx/parallelization/passes.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def handle_linear(node: Node, ctx: ParallelExecutionCtx) -> None:
field = node.target

mod: nn.Linear = graph_module.get_submodule(node.target)
key, layer_cache = id(mod), ctx.parallel_layer_cache
key, layer_cache = node.target, ctx.parallel_layer_cache
if key in layer_cache:
new_mod = layer_cache[key]
else:
Expand Down Expand Up @@ -422,7 +422,7 @@ def handle_embedding(node: Node, ctx: ParallelExecutionCtx) -> None:
field = node.target

mod: nn.Embedding = graph_module.get_submodule(node.target)
key, layer_cache = id(mod), ctx.parallel_layer_cache
key, layer_cache = node.target, ctx.parallel_layer_cache
if key in layer_cache:
new_mod = layer_cache[key]
else:
Expand Down

0 comments on commit 5095f1e

Please sign in to comment.