@inproceedings{b7e31fdaf6774f79bbd19db0997228d2,
title = "Finding Visual Task Vectors",
abstract = " Visual Prompting is a technique for teaching models to perform a visual task via in-context examples, without any additional training. In this work, we analyze the activations of MAE-VQGAN, a recent Visual Prompting model [4], and find Task Vectors, activations that encode task-specific information. Equipped with this insight, we demonstrate that it is possible to identify the Task Vectors and use them to guide the network towards performing different tasks without having to provide any in-context input-output examples. To find Task Vectors, we compute the mean activations of the attention heads in the model per task and use the REINFORCE [43] algorithm to patch into a subset of them with a new query image. The resulting Task Vectors guide the model towards performing the task better than the original model. (For code and models see www.github.com/alhojel/visual_task_vectors).",
author = "Alberto Hojel and Yutong Bai and Trevor Darrell and Amir Globerson and Amir Bar",
note = "Publisher Copyright: {\textcopyright} The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.; 18th European Conference on Computer Vision, ECCV 2024 ; Conference date: 29-09-2024 Through 04-10-2024",
year = "2025",
doi = "10.1007/978-3-031-72775-7_15",
language = "الإنجليزيّة",
isbn = "9783031727740",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "257--273",
editor = "Ale{\v s} Leonardis and Elisa Ricci and Stefan Roth and Olga Russakovsky and Torsten Sattler and G{\"u}l Varol",
booktitle = "Computer Vision – ECCV 2024 - 18th European Conference, Proceedings",
address = "ألمانيا",
}