From 1d3f6c1a5e8bb3d4d7a58552066cafbaac3f0397 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Wed, 27 Dec 2023 10:03:56 -0500 Subject: [PATCH 1/3] add comments on mixed_precision --- examples/textual_inversion/textual_inversion.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 50bcc992064d..b3acb2ba7e98 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -341,7 +341,15 @@ def parse_args(): help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." + "and an Intel Gen 4th Xeon (and later) or Nvidia Ampere GPU." + ), + ) + parser.add_argument( + "--use_ipex", + action="store_true", + help=( + "Whether or not to use ipex to accelerate the training process," + "requires Intel Gen 3rd Xeon (and later) or Intel XPU (PVC)" ), ) parser.add_argument( @@ -779,6 +787,11 @@ def main(): unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) + if args.use_ipex: + import intel_extension_for_pytorch as ipex + unet = ipex.optimize(unet, dtype=weight_dtype) + vae = ipex.optimize(vae, dtype=weight_dtype) + # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: From 9164d9a6526eeaefbf9c7b4ac78a7fb9ad31bf93 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Wed, 27 Dec 2023 10:28:12 -0500 Subject: [PATCH 2/3] add ipex --- examples/textual_inversion/README.md | 7 +++++++ examples/textual_inversion/textual_inversion.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/textual_inversion/README.md b/examples/textual_inversion/README.md index 0a2723f0982f..3d60ecc7459c 100644 --- a/examples/textual_inversion/README.md +++ b/examples/textual_inversion/README.md @@ -94,6 +94,13 @@ to a number larger than one, *e.g.*: --num_vectors 5 ``` +**CPU**: If you run on Intel Gen 4th Xeon (and later), use ipex and bf16 will get a significant acceleration. +You need to add `--mixed_precision="bf16"` and `--use_ipex` in the command and install the following package: +``` +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cpu +pip install intel-extension-for-pytorch==2.0.0 +``` + The saved textual inversion vectors will then be larger in size compared to the default case. ### Inference diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index b3acb2ba7e98..af1bf3dcdf6d 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -349,7 +349,7 @@ def parse_args(): action="store_true", help=( "Whether or not to use ipex to accelerate the training process," - "requires Intel Gen 3rd Xeon (and later) or Intel XPU (PVC)" + "requires Intel Gen 3rd Xeon (and later)" ), ) parser.add_argument( From 82cc490a7d8d97cc32bc897f082b709be9ad6951 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Wed, 27 Dec 2023 10:37:40 -0500 Subject: [PATCH 3/3] fix readme --- examples/textual_inversion/README.md | 3 +-- examples/textual_inversion/textual_inversion.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/textual_inversion/README.md b/examples/textual_inversion/README.md index 3d60ecc7459c..982141049b1c 100644 --- a/examples/textual_inversion/README.md +++ b/examples/textual_inversion/README.md @@ -97,8 +97,7 @@ to a number larger than one, *e.g.*: **CPU**: If you run on Intel Gen 4th Xeon (and later), use ipex and bf16 will get a significant acceleration. You need to add `--mixed_precision="bf16"` and `--use_ipex` in the command and install the following package: ``` -pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cpu -pip install intel-extension-for-pytorch==2.0.0 +pip install intel-extension-for-pytorch ``` The saved textual inversion vectors will then be larger in size compared to the default case. diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index af1bf3dcdf6d..1bb49f29af96 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -348,8 +348,7 @@ def parse_args(): "--use_ipex", action="store_true", help=( - "Whether or not to use ipex to accelerate the training process," - "requires Intel Gen 3rd Xeon (and later)" + "Whether or not to use ipex to accelerate the training process," "requires Intel Gen 3rd Xeon (and later)" ), ) parser.add_argument( @@ -789,6 +788,7 @@ def main(): if args.use_ipex: import intel_extension_for_pytorch as ipex + unet = ipex.optimize(unet, dtype=weight_dtype) vae = ipex.optimize(vae, dtype=weight_dtype)