Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
  • Loading branch information
camenduru authored Oct 13, 2023
1 parent 6d69b6e commit 1e45d50
Showing 1 changed file with 28 additions and 9 deletions.
37 changes: 28 additions & 9 deletions LLaVA_13b_8bit_colab.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,34 @@
"!python3 -m llava.serve.gradio_web_server --controller http://localhost:10000 --model-list-mode reload --share"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from transformers import AutoTokenizer\n",
"from llava.model import LlavaLlamaForCausalLM\n",
"import torch\n",
"\n",
"model_path = \"4bit/llava-v1.5-13b-4GB-8bit\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n",
"model = LlavaLlamaForCausalLM.from_pretrained(model_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vision_tower = model.get_vision_tower()\n",
"if not vision_tower.is_loaded:\n",
" vision_tower.load_model()\n",
"vision_tower.to(device='cpu')\n",
"image_processor = vision_tower.image_processor"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand All @@ -89,15 +117,6 @@
"# load_4bit=False\n",
"# )\n",
"\n",
"from transformers import AutoTokenizer\n",
"from llava.model import LlavaLlamaForCausalLM\n",
"import torch\n",
"\n",
"model_path = \"4bit/llava-v1.5-13b-4GB-8bit\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n",
"model = LlavaLlamaForCausalLM.from_pretrained(model_path)\n",
"# model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, load_in_8bit=True, device_map=\"auto\")\n",
"\n",
"vision_tower = model.get_vision_tower()\n",
"if not vision_tower.is_loaded:\n",
" vision_tower.load_model()\n",
Expand Down

0 comments on commit 1e45d50

Please sign in to comment.