-
Notifications
You must be signed in to change notification settings - Fork 37
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
12 changed files
with
251 additions
and
62 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
base: | ||
seed: &seed 42 | ||
model: | ||
type: Llama | ||
path: model path | ||
torch_dtype: auto | ||
calib: | ||
name: pileval | ||
download: False | ||
path: calib data path | ||
n_samples: 128 | ||
bs: -1 | ||
seq_len: 512 | ||
preproc: general | ||
seed: *seed | ||
eval: | ||
eval_pos: [transformed] | ||
name: [wikitext2, c4] | ||
download: False | ||
path: eval data path | ||
seq_len: 2048 | ||
sparse: | ||
method: ShortGPT | ||
weight: | ||
n_prune_layers: 9 | ||
save: | ||
save_trans: True | ||
save_fp: False | ||
save_lightllm: False | ||
save_path: ./save |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
import torch | ||
import torch.nn as nn | ||
from loguru import logger | ||
import gc | ||
from typing import List, Optional | ||
from transformers.models.llama.modeling_llama import LlamaRMSNorm | ||
from transformers.models.mistral.modeling_mistral import MistralRMSNorm | ||
from .base_blockwise_sparsification import BaseBlockwiseSparsification | ||
from llmc.utils.registry_factory import ALGO_REGISTRY | ||
import numpy as np | ||
from llmc.utils import copy_files | ||
import json | ||
|
||
|
||
@ALGO_REGISTRY | ||
class ShortGPT(BaseBlockwiseSparsification): | ||
def __init__(self, model, sparsity_config, input, config): | ||
super().__init__(model, sparsity_config, input, config) | ||
|
||
def block_opt(self, block): | ||
block = block.cuda() | ||
|
||
output_feat = self.block_forward(block) | ||
torch.cuda.empty_cache() | ||
self.block_transform(self.input["data"], output_feat) | ||
self.input["data"] = output_feat | ||
|
||
def block_transform(self, input_feat, output_feat): | ||
logger.info(f"Start transform the {self.block_idx+1}-th block") | ||
self.subset_transform( | ||
input_feat, | ||
output_feat | ||
) | ||
|
||
@torch.no_grad() | ||
def compute_bi( | ||
self, | ||
input_feat: torch.Tensor, | ||
output_feat: torch.Tensor | ||
): | ||
_, _, d = input_feat.shape | ||
input_feat = input_feat.reshape(-1, d) | ||
output_feat = output_feat.reshape(-1, d) | ||
|
||
norm_input = input_feat.norm(dim=-1, keepdim=True) | ||
norm_output = output_feat.norm(dim=-1, keepdim=True) | ||
|
||
sim = (input_feat @ output_feat.T) / (norm_input * norm_output) | ||
sim = sim.diagonal().nan_to_num(nan=0.5) | ||
|
||
return 1 - sim | ||
|
||
@torch.no_grad() | ||
def subset_transform( | ||
self, | ||
input_feat, | ||
output_feat | ||
): | ||
# caculate BI score | ||
if self.sparser.importances is None: | ||
self.sparser.importances = np.zeros(len(self.blocks)) | ||
self.sparser.importances[self.block_idx] = self.compute_bi(input_feat[0], output_feat[0]).sum().cpu().item() | ||
|
||
|
||
@torch.no_grad() | ||
def remove_layers( | ||
self, | ||
layers_to_remove: Optional[List[int]] = [] | ||
): | ||
if not layers_to_remove and self.sparser.n_prune_layers: | ||
layers_to_remove = np.argsort(np.array(self.sparser.importances))[:self.sparser.n_prune_layers].tolist() | ||
|
||
for idx in sorted(layers_to_remove, reverse=True): | ||
try: | ||
del self.blocks[idx] | ||
except IndexError: | ||
logger.info(f"layer {idx} does not exist") | ||
return layers_to_remove | ||
|
||
|
||
@torch.no_grad() | ||
def deploy(self, deploy_format): | ||
logger.info(f"After compute, BI scores are {self.sparser.importances}") | ||
|
||
|
||
logger.info(f"-- deploy_sparsity_model start --") | ||
logger.info(f"sparsity_config : {self.sparsity_config}") | ||
|
||
logger.info(f"-- begin remove layers --") | ||
layers_to_remove = self.remove_layers() | ||
logger.info(f"remove layers: {layers_to_remove}") | ||
|
||
logger.info(f"-- deploy_sparsity_model done --") | ||
|
||
@torch.no_grad() | ||
def save_model(self, path): | ||
if self.config.model.type == "Llava": | ||
self.model.llava_model.language_model = self.model.get_model() | ||
self.model.llava_model.save_pretrained(path) | ||
logger.info(f"save model done --") | ||
self.copy_tokenizer(path) | ||
copy_files(self.config.model.path, path, "preprocessor_config") | ||
else: | ||
self.model.get_model().save_pretrained(path) | ||
config_file = path + '/config.json' | ||
|
||
logger.info(f"save model done --") | ||
self.copy_tokenizer(path) | ||
with open(config_file, 'r') as file: | ||
config_new = json.load(file) | ||
config_new['num_hidden_layers'] = len(self.blocks) | ||
with open(config_file, 'w') as file: | ||
json.dump(config_new, file, indent=4) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,9 @@ | ||
class Sparser: | ||
def __init__(self, sparsity, **kwargs): | ||
self.sparsity = sparsity | ||
self.kwargs = kwargs | ||
self.W_mask = None | ||
def __init__(self, sparsity_constraint, **kwargs): | ||
if 'sparsity' in sparsity_constraint: | ||
self.sparsity = sparsity_constraint["sparsity"] | ||
self.W_mask = None | ||
elif 'n_prune_layers' in sparsity_constraint: | ||
self.n_prune_layers = sparsity_constraint["n_prune_layers"] | ||
self.importances = None | ||
self.kwargs = kwargs |
Oops, something went wrong.