@inproceedings{724bb73bb3ca4bcaa0fc784d29e6ed98,
title = "Multicalibration: Calibration for the (computationally-identifiable) masses",
abstract = "We develop and study multicalibration as a new measure of fairness in machine learning that aims to mitigate inadvertent or malicious discrimination that is introduced at training time (even from ground truth data). Multicalibration guarantees meaningful (calibrated) predictions for every subpopulation that can be identified within a specified class of computations. The specified class can be quite rich; in particular, it can contain many overlapping subgroups of a protected group. We demonstrate that in many settings this strong notion of protection from discrimination is provably attainable and aligned with the goal of accurate predictions. Along the way, we present algorithms for learning a multicalibrated predictor, study the computational complexity of this task, and illustrate tight connections to the agnostic learning model.",
author = "{\'U}rsula H{\'e}bert-Johnson and Kim, \{Michael P.\} and Omer Reingold and Rothblum, \{Guy N.\}",
note = "Publisher Copyright: {\textcopyright} 2018 by authors.All right reserved.; 35th International Conference on Machine Learning, ICML 2018 ; Conference date: 10-07-2018 Through 15-07-2018",
year = "2018",
language = "الإنجليزيّة",
series = "35th International Conference on Machine Learning, ICML 2018",
pages = "3087--3103",
editor = "Jennifer Dy and Andreas Krause",
booktitle = "35th International Conference on Machine Learning, ICML 2018",
}