@inproceedings{r-menon-srivastava-2024-discern,
title = "{DISCERN}: Decoding Systematic Errors in Natural Language for Text Classifiers",
author = "R Menon, Rakesh and
Srivastava, Shashank",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.1091/",
doi = "10.18653/v1/2024.emnlp-main.1091",
pages = "19565--19583",
abstract = "Despite their high predictive accuracies, current machine learning systems often exhibit systematic biases stemming from annotation artifacts or insufficient support for certain classes in the dataset. Recent work proposes automatic methods for identifying and explaining systematic biases using keywords. We introduce DISCERN, a framework for interpreting systematic biases in text classifiers using language explanations. DISCERN iteratively generates precise natural language descriptions of systematic errors by employing an interactive loop between two large language models. Finally, we use the descriptions to improve classifiers by augmenting classifier training sets with synthetically generated instances or annotated examples via active learning. On three text-classification datasets, we demonstrate that language explanations from our framework induce consistent performance improvements that go beyond what is achievable with exemplars of systematic bias. Finally, in human evaluations, we show that users can interpret systematic biases more effectively (by over 25{\%} relative) and efficiently when described through language explanations as opposed to cluster exemplars."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="r-menon-srivastava-2024-discern">
<titleInfo>
<title>DISCERN: Decoding Systematic Errors in Natural Language for Text Classifiers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rakesh</namePart>
<namePart type="family">R Menon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shashank</namePart>
<namePart type="family">Srivastava</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite their high predictive accuracies, current machine learning systems often exhibit systematic biases stemming from annotation artifacts or insufficient support for certain classes in the dataset. Recent work proposes automatic methods for identifying and explaining systematic biases using keywords. We introduce DISCERN, a framework for interpreting systematic biases in text classifiers using language explanations. DISCERN iteratively generates precise natural language descriptions of systematic errors by employing an interactive loop between two large language models. Finally, we use the descriptions to improve classifiers by augmenting classifier training sets with synthetically generated instances or annotated examples via active learning. On three text-classification datasets, we demonstrate that language explanations from our framework induce consistent performance improvements that go beyond what is achievable with exemplars of systematic bias. Finally, in human evaluations, we show that users can interpret systematic biases more effectively (by over 25% relative) and efficiently when described through language explanations as opposed to cluster exemplars.</abstract>
<identifier type="citekey">r-menon-srivastava-2024-discern</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.1091</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.1091/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>19565</start>
<end>19583</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DISCERN: Decoding Systematic Errors in Natural Language for Text Classifiers
%A R Menon, Rakesh
%A Srivastava, Shashank
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F r-menon-srivastava-2024-discern
%X Despite their high predictive accuracies, current machine learning systems often exhibit systematic biases stemming from annotation artifacts or insufficient support for certain classes in the dataset. Recent work proposes automatic methods for identifying and explaining systematic biases using keywords. We introduce DISCERN, a framework for interpreting systematic biases in text classifiers using language explanations. DISCERN iteratively generates precise natural language descriptions of systematic errors by employing an interactive loop between two large language models. Finally, we use the descriptions to improve classifiers by augmenting classifier training sets with synthetically generated instances or annotated examples via active learning. On three text-classification datasets, we demonstrate that language explanations from our framework induce consistent performance improvements that go beyond what is achievable with exemplars of systematic bias. Finally, in human evaluations, we show that users can interpret systematic biases more effectively (by over 25% relative) and efficiently when described through language explanations as opposed to cluster exemplars.
%R 10.18653/v1/2024.emnlp-main.1091
%U https://aclanthology.org/2024.emnlp-main.1091/
%U https://doi.org/10.18653/v1/2024.emnlp-main.1091
%P 19565-19583
Markdown (Informal)
[DISCERN: Decoding Systematic Errors in Natural Language for Text Classifiers](https://aclanthology.org/2024.emnlp-main.1091/) (R Menon & Srivastava, EMNLP 2024)
- DISCERN: Decoding Systematic Errors in Natural Language for Text Classifiers (R Menon & Srivastava, EMNLP 2024)
ACL
- Rakesh R Menon and Shashank Srivastava. 2024. DISCERN: Decoding Systematic Errors in Natural Language for Text Classifiers. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 19565–19583, Miami, Florida, USA. Association for Computational Linguistics.