@inproceedings{1b2a595922134578a7890cb2817dcd41,
title = "ReFACT: Updating Text-to-Image Models by Editing the Text Encoder",
abstract = "Our world is marked by unprecedented technological, global, and socio-political transformations, posing a significant challenge to text-to-image generative models. These models encode factual associations within their parameters that can quickly become outdated, diminishing their utility for end-users. To that end, we introduce ReFACT, a novel approach for editing factual associations in text-to-image models without relaying on explicit input from end-users or costly re-training. ReFACT updates the weights of a specific layer in the text encoder, modifying only a tiny portion of the model{\textquoteright}s parameters and leaving the rest of the model unaffected. We empirically evaluate ReFACT on an existing benchmark, alongside a newly curated dataset. Compared to other methods, ReFACT achieves superior performance in both generalization to related concepts and preservation of unrelated concepts. Furthermore, ReFACT maintains image generation quality, making it a practical tool for updating and correcting factual information in text-to-image models.",
author = "Dana Arad and Hadas Orgad and Yonatan Belinkov",
note = "Publisher Copyright: {\textcopyright} 2024 Association for Computational Linguistics.; 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL 2024 ; Conference date: 16-06-2024 Through 21-06-2024",
year = "2024",
doi = "10.18653/v1/2024.naacl-long.140",
language = "الإنجليزيّة",
series = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL 2024",
pages = "2537--2558",
editor = "Kevin Duh and Helena Gomez and Steven Bethard",
booktitle = "Long Papers",
}