diff --git a/content/authors/admin/_index.md b/content/authors/admin/_index.md index cefca69..301ebac 100644 --- a/content/authors/admin/_index.md +++ b/content/authors/admin/_index.md @@ -32,7 +32,7 @@ education: - course: Research Intern institution: National Institute of Informatics (NII), Tokyo, Japan year: 2016 - - course: CSC Joint Ph.D. student + - course: Joint Ph.D. student institution: University of Washington (UW), Seattle, USA year: 2012 - course: B.Eng. in Software Engineering diff --git a/content/publication/Dr2Net/cite.bib b/content/publication/Dr2Net/cite.bib new file mode 100644 index 0000000..7737434 --- /dev/null +++ b/content/publication/Dr2Net/cite.bib @@ -0,0 +1,7 @@ +@inproceedings{zhao2024dr2net, + title={{Dr2Net}: Dynamic Reversible Dual-Residual Networks for Memory-Efficient Finetuning}, + author={Zhao, Chen and Liu, Shuming and Mangalam, Karttikeya and Qian, Guocheng and Zohra, Fatimah and Alghannam, Abdulmohsen and Malik, Jitendra and Ghanem, Bernard}, + booktitle={arXiv:2401.04105v1}, + year={2024} +} + diff --git a/content/publication/Dr2Net/featured.png b/content/publication/Dr2Net/featured.png new file mode 100644 index 0000000..96b39ca Binary files /dev/null and b/content/publication/Dr2Net/featured.png differ diff --git a/content/publication/Dr2Net/index.md b/content/publication/Dr2Net/index.md new file mode 100644 index 0000000..6834a3b --- /dev/null +++ b/content/publication/Dr2Net/index.md @@ -0,0 +1,44 @@ +--- +title: "Dr2Net: Dynamic Reversible Dual-Residual Networks for Memory-Efficient Finetuning" +publication_types: + - "2" +authors: + - admin + - Shuming Liu + - Karttikeya Mangalam + - Guocheng Qian + - Fatimah Zohra + - Abdulmohsen Alghannam + - Jitendra Malik + - Bernard Ghanem +publication: arXiv:2401.04105 +publication_short: arxiv 2024 +abstract: "Large pretrained models are increasingly crucial in modern computer vision tasks. These models are typically used in downstream tasks by end-to-end finetuning, which is highly memory-intensive for tasks with high-resolution data, e.g., video understanding, small object detection, and point cloud analysis. In this paper, we propose Dynamic Reversible Dual-Residual Networks, or Dr2Net, a novel family of network architectures that acts as a surrogate network to finetune a pretrained model with substantially reduced memory consumption. Dr2Net contains two types of residual connections, one maintaining the residual structure in the pretrained models, and the other making the network reversible. Due to its reversibility, intermediate activations, which can be reconstructed from output, are cleared from memory during training. We use two coefficients on either type of residual connections respectively, and introduce a dynamic training strategy that seamlessly transitions the pretrained model to a reversible network with much higher numerical precision. We evaluate Dr2Net on various pretrained models and various tasks, and show that it can reach comparable performance to conventional finetuning but with significantly less memory usage." + +draft: false +featured: true +tags: + - Deep learning + - Computer vision + - Reversible networks + - Memory-efficient finetuning +slides: "" +url_pdf: https://arxiv.org/pdf/2401.04105.pdf +image: + caption: "" + focal_point: "" + preview_only: false + filename: featured.png +url_dataset: "" +url_project: "" +url_source: "" +url_video: "" +author_notes: [] +doi: "" +projects: [] +date: 2024-01-4T00:00:02.020Z +url_slides: "" +publishDate: 2024 +url_poster: "" +url_code: "" +--- diff --git a/content/publication/LAE/index.md b/content/publication/LAE/index.md index dded06e..5de8f6f 100644 --- a/content/publication/LAE/index.md +++ b/content/publication/LAE/index.md @@ -36,9 +36,9 @@ url_video: "" author_notes: [] doi: "" projects: [] -date: 2023-07-16T00:00:02.020Z +date: 2021-01-16T00:00:02.020Z url_slides: "" -publishDate: 2023 +publishDate: 2021 url_poster: "" url_code: ---