@inproceedings{6f3719a97816419891c833aeaabc9df7,
title = "Protecting the Protected Group: Circumventing Harmful Fairness",
abstract = "The recent literature on fair Machine Learning manifests that the choice of fairness constraints must be driven by the utilities of the population. However, virtually all previous work makes the unrealistic assumption that the exact underlying utilities of the population (representing private tastes of individuals) are known to the regulator that imposes the fairness constraint. In this paper we initiate the discussion of the mismatch, the unavoidable difference between the underlying utilities of the population and the utilities assumed by the regulator. We demonstrate that the mismatch can make the disadvantaged protected group worse off after imposing the fairness constraint and provide tools to design fairness constraints that help the disadvantaged group despite the mismatch.",
author = "Omer Ben-Porat and Fedor Sandomirskiy and Moshe Tennenholtz",
note = "Publisher Copyright: Copyright {\textcopyright} 2021, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.; 35th AAAI Conference on Artificial Intelligence, AAAI 2021 ; Conference date: 02-02-2021 Through 09-02-2021",
year = "2021",
doi = "https://doi.org/10.1609/aaai.v35i6.16654",
language = "الإنجليزيّة",
series = "35th AAAI Conference on Artificial Intelligence, AAAI 2021",
pages = "5176--5184",
booktitle = "35th AAAI Conference on Artificial Intelligence, AAAI 2021",
}