diff --git a/README.md b/README.md index 8630082..822745c 100644 --- a/README.md +++ b/README.md @@ -32,9 +32,8 @@ Contributions are encouraged! However, please **remember to remove any private i - [ ] ControlNet - [x] Multi-GPU supports -- [ ] img2img - - [ ] Image Encoder - - [ ] Upload image +- [x] img2img +- [ ] Upload image - [ ] Notebook Testing CI - [x] CPU CI - [ ] GPU CI? diff --git a/stable_diffusion_notebookui.ipynb b/stable_diffusion_notebookui.ipynb index 25ef332..c0108a6 100644 --- a/stable_diffusion_notebookui.ipynb +++ b/stable_diffusion_notebookui.ipynb @@ -14,124 +14,26 @@ }, { "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Collecting diffusers\n", - " Downloading diffusers-0.27.2-py3-none-any.whl (2.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m13.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.38.2)\n", - "Collecting transformers\n", - " Downloading transformers-4.39.3-py3-none-any.whl (8.8 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.8/8.8 MB\u001b[0m \u001b[31m47.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting mediapy\n", - " Downloading mediapy-1.2.0-py3-none-any.whl (25 kB)\n", - "Collecting compel\n", - " Downloading compel-2.0.2-py3-none-any.whl (30 kB)\n", - "Collecting accelerate\n", - " Downloading accelerate-0.29.2-py3-none-any.whl (297 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m297.4/297.4 kB\u001b[0m \u001b[31m26.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.10/dist-packages (from diffusers) (7.1.0)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from diffusers) (3.13.4)\n", - "Requirement already satisfied: huggingface-hub>=0.20.2 in /usr/local/lib/python3.10/dist-packages (from diffusers) (0.20.3)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from diffusers) (1.25.2)\n", - "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from diffusers) (2023.12.25)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from diffusers) (2.31.0)\n", - "Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from diffusers) (0.4.2)\n", - "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from diffusers) (9.4.0)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers) (24.0)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (6.0.1)\n", - "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.15.2)\n", - "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.66.2)\n", - "Requirement already satisfied: ipython in /usr/local/lib/python3.10/dist-packages (from mediapy) (7.34.0)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mediapy) (3.7.1)\n", - "Requirement already satisfied: pyparsing~=3.0 in /usr/local/lib/python3.10/dist-packages (from compel) (3.1.2)\n", - "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from compel) (2.2.1+cu121)\n", - "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate) (5.9.5)\n", - "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.20.2->diffusers) (2023.6.0)\n", - "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.20.2->diffusers) (4.11.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->compel) (1.12)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->compel) (3.3)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->compel) (3.1.3)\n", - "Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch->compel)\n", - " Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n", - "Collecting nvidia-cuda-runtime-cu12==12.1.105 (from torch->compel)\n", - " Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n", - "Collecting nvidia-cuda-cupti-cu12==12.1.105 (from torch->compel)\n", - " Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n", - "Collecting nvidia-cudnn-cu12==8.9.2.26 (from torch->compel)\n", - " Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n", - "Collecting nvidia-cublas-cu12==12.1.3.1 (from torch->compel)\n", - " Using cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n", - "Collecting nvidia-cufft-cu12==11.0.2.54 (from torch->compel)\n", - " Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n", - "Collecting nvidia-curand-cu12==10.3.2.106 (from torch->compel)\n", - " Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n", - "Collecting nvidia-cusolver-cu12==11.4.5.107 (from torch->compel)\n", - " Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n", - "Collecting nvidia-cusparse-cu12==12.1.0.106 (from torch->compel)\n", - " Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n", - "Collecting nvidia-nccl-cu12==2.19.3 (from torch->compel)\n", - " Using cached nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl (166.0 MB)\n", - "Collecting nvidia-nvtx-cu12==12.1.105 (from torch->compel)\n", - " Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n", - "Requirement already satisfied: triton==2.2.0 in /usr/local/lib/python3.10/dist-packages (from torch->compel) (2.2.0)\n", - "Collecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch->compel)\n", - " Using cached nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (21.1 MB)\n", - "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata->diffusers) (3.18.1)\n", - "Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (67.7.2)\n", - "Collecting jedi>=0.16 (from ipython->mediapy)\n", - " Downloading jedi-0.19.1-py2.py3-none-any.whl (1.6 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m73.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (4.4.2)\n", - "Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (0.7.5)\n", - "Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (5.7.1)\n", - "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (3.0.43)\n", - "Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (2.16.1)\n", - "Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (0.2.0)\n", - "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (0.1.6)\n", - "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython->mediapy) (4.9.0)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mediapy) (1.2.1)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mediapy) (0.12.1)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mediapy) (4.51.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mediapy) (1.4.5)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mediapy) (2.8.2)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers) (3.3.2)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers) (3.6)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers) (2.0.7)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->diffusers) (2024.2.2)\n", - "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython->mediapy) (0.8.4)\n", - "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython->mediapy) (0.7.0)\n", - "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->mediapy) (0.2.13)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mediapy) (1.16.0)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->compel) (2.1.5)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->compel) (1.3.0)\n", - "Installing collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, jedi, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, mediapy, diffusers, transformers, compel, accelerate\n", - " Attempting uninstall: transformers\n", - " Found existing installation: transformers 4.38.2\n", - " Uninstalling transformers-4.38.2:\n", - " Successfully uninstalled transformers-4.38.2\n", - "Successfully installed accelerate-0.29.2 compel-2.0.2 diffusers-0.27.2 jedi-0.19.1 mediapy-1.2.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.19.3 nvidia-nvjitlink-cu12-12.4.127 nvidia-nvtx-cu12-12.1.105 transformers-4.39.3\n" - ] - } - ], + "execution_count": null, + "metadata": { + "id": "ufD_d64nr08H" + }, + "outputs": [], "source": [ - "%pip install --upgrade diffusers transformers mediapy compel accelerate" + "%pip install --quiet --upgrade diffusers transformers mediapy compel accelerate" ] }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, + "execution_count": null, + "metadata": { + "id": "t123ZHCrseRr" + }, "outputs": [], "source": [ "# restart to use newly installed packages\n", "restart = False # @param {type:\"boolean\"}\n", - "# @markdown > This is usually necessary for releasing RAM and VRAM.\n", + "# @markdown > This is usually necessary on colab.\n", "if restart:\n", " import os\n", " os._exit(0) # Restart the notebook" @@ -139,60 +41,12 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--2024-04-15 12:52:46-- https://huggingface.co/mirroring/civitai_mirror/resolve/main/models/Stable-diffusion/AnythingV5V3_v5PrtRE.safetensors\n", - "Resolving huggingface.co (huggingface.co)... 18.239.50.49, 18.239.50.16, 18.239.50.103, ...\n", - "Connecting to huggingface.co (huggingface.co)|18.239.50.49|:443... connected.\n", - "HTTP request sent, awaiting response... 302 Found\n", - "Location: https://cdn-lfs.huggingface.co/repos/7f/f4/7ff4da9ea95382d37e64e2f21cf395e6e1893c4761b430a659d90d6c31da469c/7f96a1a9ca9b3a3242a9ae95d19284f0d2da8d5282b42d2d974398bf7663a252?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27AnythingV5V3_v5PrtRE.safetensors%3B+filename%3D%22AnythingV5V3_v5PrtRE.safetensors%22%3B&Expires=1713444766&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMzQ0NDc2Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy83Zi9mNC83ZmY0ZGE5ZWE5NTM4MmQzN2U2NGUyZjIxY2YzOTVlNmUxODkzYzQ3NjFiNDMwYTY1OWQ5MGQ2YzMxZGE0NjljLzdmOTZhMWE5Y2E5YjNhMzI0MmE5YWU5NWQxOTI4NGYwZDJkYThkNTI4MmI0MmQyZDk3NDM5OGJmNzY2M2EyNTI%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=ElSxCIZbHg64qPCCli3qlg0FU8ynY-8REHqeOHjIDuVpnP1ahkR0dwZs095zI4n1h0TVDyG-pi5PLNDASHNedEba5OA5a3-n-BwTXj5GQaThFppn9q3wawGtHCd4qyevx0CSm5pToHEqcg7yUC9agbstB11NXE65otj4sfKAIgHVz04ZYYFZIYRz6FFJr-sRvXZdtblIPKyGRRrBcYSbWAjpJo2utoJZAzq%7EHSUPDWtqO42Fd1B7B2brCeV-70MFOEhb7Aj2RCWocEvGLZUsHuBwmwPxMyI6eHdVYKIa38lBLvbfS8XZeoTzZ7iiR2H4qricbu0VT5%7EYVSavFLuZMQ__&Key-Pair-Id=KVTP0A1DKRTAX [following]\n", - "--2024-04-15 12:52:46-- https://cdn-lfs.huggingface.co/repos/7f/f4/7ff4da9ea95382d37e64e2f21cf395e6e1893c4761b430a659d90d6c31da469c/7f96a1a9ca9b3a3242a9ae95d19284f0d2da8d5282b42d2d974398bf7663a252?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27AnythingV5V3_v5PrtRE.safetensors%3B+filename%3D%22AnythingV5V3_v5PrtRE.safetensors%22%3B&Expires=1713444766&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMzQ0NDc2Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy83Zi9mNC83ZmY0ZGE5ZWE5NTM4MmQzN2U2NGUyZjIxY2YzOTVlNmUxODkzYzQ3NjFiNDMwYTY1OWQ5MGQ2YzMxZGE0NjljLzdmOTZhMWE5Y2E5YjNhMzI0MmE5YWU5NWQxOTI4NGYwZDJkYThkNTI4MmI0MmQyZDk3NDM5OGJmNzY2M2EyNTI%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=ElSxCIZbHg64qPCCli3qlg0FU8ynY-8REHqeOHjIDuVpnP1ahkR0dwZs095zI4n1h0TVDyG-pi5PLNDASHNedEba5OA5a3-n-BwTXj5GQaThFppn9q3wawGtHCd4qyevx0CSm5pToHEqcg7yUC9agbstB11NXE65otj4sfKAIgHVz04ZYYFZIYRz6FFJr-sRvXZdtblIPKyGRRrBcYSbWAjpJo2utoJZAzq%7EHSUPDWtqO42Fd1B7B2brCeV-70MFOEhb7Aj2RCWocEvGLZUsHuBwmwPxMyI6eHdVYKIa38lBLvbfS8XZeoTzZ7iiR2H4qricbu0VT5%7EYVSavFLuZMQ__&Key-Pair-Id=KVTP0A1DKRTAX\n", - "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 108.156.60.112, 108.156.60.37, 108.156.60.109, ...\n", - "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|108.156.60.112|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 2132626102 (2.0G) [binary/octet-stream]\n", - "Saving to: ‘AnythingV5V3_v5PrtRE.safetensors’\n", - "\n", - "AnythingV5V3_v5PrtR 100%[===================>] 1.99G 255MB/s in 8.6s \n", - "\n", - "2024-04-15 12:52:55 (237 MB/s) - ‘AnythingV5V3_v5PrtRE.safetensors’ saved [2132626102/2132626102]\n", - "\n", - "--2024-04-15 12:53:08-- https://raw.githubusercontent.com/huggingface/diffusers/v0.27.2/scripts/convert_original_stable_diffusion_to_diffusers.py\n", - "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.109.133, 185.199.110.133, ...\n", - "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 7022 (6.9K) [text/plain]\n", - "Saving to: ‘convert_original_stable_diffusion_to_diffusers.py’\n", - "\n", - "convert_original_st 100%[===================>] 6.86K --.-KB/s in 0s \n", - "\n", - "2024-04-15 12:53:08 (63.2 MB/s) - ‘convert_original_stable_diffusion_to_diffusers.py’ saved [7022/7022]\n", - "\n", - "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.\n", - "0it [00:00, ?it/s]\n", - "2024-04-15 12:53:18.098791: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", - "2024-04-15 12:53:18.098849: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", - "2024-04-15 12:53:18.232487: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "2024-04-15 12:53:19.971293: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", - "config.json: 100% 4.52k/4.52k [00:00<00:00, 15.1MB/s]\n", - "tokenizer_config.json: 100% 905/905 [00:00<00:00, 4.17MB/s]\n", - "vocab.json: 100% 961k/961k [00:00<00:00, 1.54MB/s]\n", - "merges.txt: 100% 525k/525k [00:00<00:00, 13.2MB/s]\n", - "special_tokens_map.json: 100% 389/389 [00:00<00:00, 2.21MB/s]\n", - "tokenizer.json: 100% 2.22M/2.22M [00:00<00:00, 5.30MB/s]\n", - "config.json: 100% 4.55k/4.55k [00:00<00:00, 16.7MB/s]\n", - "pytorch_model.bin: 100% 1.22G/1.22G [00:34<00:00, 35.7MB/s]\n", - "preprocessor_config.json: 100% 342/342 [00:00<00:00, 1.85MB/s]\n", - "/usr/local/lib/python3.10/dist-packages/transformers/models/clip/feature_extraction_clip.py:28: FutureWarning: The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use CLIPImageProcessor instead.\n", - " warnings.warn(\n" - ] - } - ], + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "G_LHyoMTMFAY" + }, + "outputs": [], "source": [ "# @markdown ### **Fetch model file**\n", "import os\n", @@ -234,33 +88,12 @@ }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "09609a78912c46feb605002fd4670891", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Loading pipeline components...: 0%| | 0/6 [00:00 by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\n" - ] - } - ], + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "bG2hkmSEvByV" + }, + "outputs": [], "source": [ "import mediapy as media\n", "import random\n", @@ -282,7 +115,7 @@ "# @markdown > force using CPU device for interference\n", "force_fp32_processing = False # @param {type:\"boolean\"}\n", "# @markdown > force float32 processing format. Usually needed when image results are black.\n", - "use_accelerate = False # @param {type:\"boolean\"}\n", + "use_accelerate = False\n", "# @markdown > Use accelerate to fully utilize all GPUs.\n", "\n", "torch.set_num_threads(os.cpu_count())\n", @@ -317,8 +150,9 @@ "\n", "if not use_accelerate:\n", " # Use late pipeline initializations for accelerate\n", + " kwargs = dict()\n", " if clip_skip > 2:\n", - " pipeline_kwargs['text_encoder'] = transformers.CLIPTextModel.from_pretrained(\n", + " kwargs['text_encoder'] = transformers.CLIPTextModel.from_pretrained(\n", " \"runwayml/stable-diffusion-v1-5\",\n", " subfolder = \"text_encoder\",\n", " num_hidden_layers = 12 - (clip_skip - 1),\n", @@ -337,8 +171,11 @@ }, { "cell_type": "code", - "execution_count": 5, - "metadata": {}, + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "M4Ix-a_hZWFT" + }, "outputs": [], "source": [ "# @markdown # Prepare utils functions\n", @@ -386,7 +223,7 @@ "def accelerated_generate(h, w, cfg_scale, num_inference_steps, batch_size, seeds, kwargs, result_list, use_prompt_embeddings):\n", " accelerator = Accelerator()\n", " device = accelerator.device\n", - "\n", + " \n", " pipe = DiffusionPipeline.from_pretrained(\n", " model_dir,\n", " safety_checker = None,\n", @@ -394,7 +231,7 @@ " **pipeline_kwargs,\n", " ).to(device)\n", " pipe.to(device)\n", - "\n", + " \n", " if use_prompt_embeddings:\n", " prompt_embeds, pooled_prompt_embeds, negative_prompt_embeds, negative_pooled_prompt_embeds = get_prompt_embeddings(\n", " pipe,\n", @@ -412,7 +249,7 @@ " else:\n", " kwargs['prompt'] = prompt\n", " kwargs['negative_prompt'] = negative_prompt\n", - "\n", + " \n", " with accelerator.split_between_processes(seeds) as task_seeds:\n", " images = pipe(\n", " height = h,\n", @@ -428,36 +265,12 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "ec7d00a41132405b8d11f590ad203bfb", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/30 [00:00" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "AUc4QJfE-uR9" + }, + "outputs": [], "source": [ "# @markdown ### **Prompts**\n", "# @markdown > Default parameters are from image https://civitai.com/images/963361\n", @@ -515,12 +328,12 @@ " if use_accelerate:\n", " from accelerate import notebook_launcher\n", " from multiprocessing import Manager\n", - "\n", + " \n", " with Manager() as manager:\n", " result_list = manager.list()\n", " notebook_launcher(\n", - " accelerated_generate,\n", - " (h, w, cfg_scale, num_inference_steps, batch_size, batched_seeds, kwargs, result_list, use_prompt_embeddings,),\n", + " accelerated_generate, \n", + " (h, w, cfg_scale, num_inference_steps, batch_size, batched_seeds, kwargs, result_list, use_prompt_embeddings,), \n", " num_processes=torch.cuda.device_count()\n", " )\n", " results.extend(result_list)\n", @@ -528,7 +341,7 @@ " # Todo: use fp16 with accelerate\n", " else:\n", " images = pipe(\n", - " height = h,\n", + " height = h, \n", " width = w,\n", " guidance_scale = cfg_scale,\n", " num_inference_steps = num_inference_steps,\n", @@ -543,20 +356,12 @@ }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "mkdir: created directory 'outputs'\n", - " adding: outputs/\t(in=0) (out=0) (stored 0%)\n", - " adding: outputs/1678803042.png \t(in=1067213) (out=1066999) (deflated 0%)\n", - "total bytes=1067213, compressed=1066999 -> 0% savings\n" - ] - } - ], + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "siDk-W0AKOeg" + }, + "outputs": [], "source": [ "# @markdown Save Images with seeds\n", "!mkdir -pv outputs\n", @@ -570,10 +375,12 @@ } ], "metadata": { - "language_info": { - "name": "" + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 }