diff --git a/README.md b/README.md
index b2d9597..48606be 100644
--- a/README.md
+++ b/README.md
@@ -33,13 +33,11 @@ With `xTuring` you can,
-## 🌟 INT4 fine-tuning and generation with LLaMA LoRA
+## 🌟 What's new?
+We are excited to announce the latest enhancements to our `xTuring` library: Falcon LLM integration and Generic model support. With this update, you can use and finetune Falcon-7B model with the off-the-shelf, off-the-shelf with INT8 precision, with LoRA architecture, and LoRA architecture with INT8 precision. Moreover, in case you do not find a model you want to run in the models' list, you can still us `xTuring` to run with the new `GenericModel` wrapper available to you. This new integration allows you to test and finetune any new model on xTuring without waiting for it to be integrated.
-We are excited to announce the latest enhancement to our `xTuring` library: INT4 fine-tuning and generation integration. With this update, you can fine-tune LLMs like LLaMA with LoRA architecture in INT4 precision with less than `6 GB` of VRAM. This breakthrough significantly reduces memory requirements and accelerates the fine-tuning process, allowing you to achieve state-of-the-art performance with less computational resources.
-
-More information about INT4 fine-tuning and benchmarks can be found in the [INT4 README](examples/int4_finetuning/README.md).
-
-You can check out the [LLaMA INT4 fine-tuning example](examples/int4_finetuning/LLaMA_lora_int4.ipynb) to see how it works.
+You can check the [Falcon LoRA INT8 working example](examples/falcon/falcon_lora_int8.py) repository to see how it works.
+Also, you can check the [GenericModel working example](examples/generic/generic_model.py) repository to see how it works.
@@ -147,6 +145,8 @@ model = BaseModel.load("x/distilgpt2_lora_finetuned_alpaca")
- [x] Added fine-tuned checkpoints for some models to the hub
- [x] INT4 LLaMA LoRA fine-tuning demo
- [x] INT4 LLaMA LoRA fine-tuning with INT4 generation
+- [x] Support for a generic model wrapper
+- [x] Support for Falcon-7B model
- [ ] Evaluation of LLM models
- [ ] Support for Stable Diffusion
diff --git a/docs/docs/contributing/general_rules.md b/docs/docs/contributing/general_rules.md
index 06e7c81..eb91abf 100644
--- a/docs/docs/contributing/general_rules.md
+++ b/docs/docs/contributing/general_rules.md
@@ -26,9 +26,10 @@ To contribute to xTuring, follow these steps:
git clone https://github.com//xturing.git
```
-3. Create a new branch for your changes
+3. Create a new branch for your changes emerging from the `dev` branch.
```bash
+git checkout dev
git checkout -b
```
diff --git a/pyproject.toml b/pyproject.toml
index acf19d1..c5805bb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "xturing"
-version = "0.1.3"
+version = "0.1.4"
description = "Fine-tuning, evaluation and data generation for LLMs"
authors = [
diff --git a/src/xturing/__about__.py b/src/xturing/__about__.py
index ae73625..bbab024 100644
--- a/src/xturing/__about__.py
+++ b/src/xturing/__about__.py
@@ -1 +1 @@
-__version__ = "0.1.3"
+__version__ = "0.1.4"