Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
remove useless comments
Browse files Browse the repository at this point in the history
  • Loading branch information
LucasWilkinson committed Feb 14, 2024
1 parent ae57f2c commit fb95394
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 5 deletions.
4 changes: 0 additions & 4 deletions vllm/model_executor/layers/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,6 @@ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
assert param_data.shape == loaded_weight.shape
param_data.copy_(loaded_weight)

# If LazyCompressedParameter, compress the data.
if isinstance(param, LazyCompressedParameter):
param.compress()

Expand Down Expand Up @@ -463,8 +462,6 @@ def weight_loader(self,
# all of the shards are loaded, for the QKV matrix this means
# loading shards "q", "k" and "v"
all_shards_loaded = (self.loaded_shards == set(["q", "k", "v"]))

# If LazyCompressedParameter, compress the data.
if all_shards_loaded and isinstance(param, LazyCompressedParameter):
param.compress()

Expand Down Expand Up @@ -559,7 +556,6 @@ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
assert param_data.shape == loaded_weight.shape
param_data.copy_(loaded_weight)

# If LazyCompressedParameter, compress the data.
if isinstance(param, LazyCompressedParameter):
param.compress()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def apply_weights(
) -> torch.Tensor:
w: LazyCompressedParameter = weights["weight"]

# if we never compressed (likely due to insufficient sparsity),
# if we never compressed (likely due to insufficient sparsity),
# i.e. have uncompressed_data run normally
if w.has_uncompressed_data:
assert not w.has_compressed_data
Expand Down

0 comments on commit fb95394

Please sign in to comment.