From 1ac899800b600ec3e805f32f82a0ced4843bf24a Mon Sep 17 00:00:00 2001 From: tpoisonooo Date: Mon, 6 May 2024 13:05:28 +0800 Subject: [PATCH] docs(config.qmd): add loraplus example (#1577) * Update qwen2-moe-lora.yaml * feat(project): update --- docs/config.qmd | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/config.qmd b/docs/config.qmd index dadc5c487c..570a173f9a 100644 --- a/docs/config.qmd +++ b/docs/config.qmd @@ -227,6 +227,12 @@ lora_modules_to_save: lora_fan_in_fan_out: false +# LoRA+ hyperparameters +# For more details about the following options, see: +# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py` +loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4. +loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6. + peft: # Configuration options for loftq initialization for LoRA # https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization