@inproceedings{berezin-etal-2023-offence,
title = "No offence, Bert - {I} insult only humans! Multilingual sentence-level attack on toxicity detection networks",
author = "Berezin, Sergey and
Farahbakhsh, Reza and
Crespi, Noel",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.155",
doi = "10.18653/v1/2023.findings-emnlp.155",
pages = "2362--2369",
abstract = "We introduce a simple yet efficient sentence-level attack on black-box toxicity detector models. By adding several positive words or sentences to the end of a hateful message, we are able to change the prediction of a neural network and pass the toxicity detection system check. This approach is shown to be working on seven languages from three different language families. We also describe the defence mechanism against the aforementioned attack and discuss its limitations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="berezin-etal-2023-offence">
<titleInfo>
<title>No offence, Bert - I insult only humans! Multilingual sentence-level attack on toxicity detection networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sergey</namePart>
<namePart type="family">Berezin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Reza</namePart>
<namePart type="family">Farahbakhsh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Noel</namePart>
<namePart type="family">Crespi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We introduce a simple yet efficient sentence-level attack on black-box toxicity detector models. By adding several positive words or sentences to the end of a hateful message, we are able to change the prediction of a neural network and pass the toxicity detection system check. This approach is shown to be working on seven languages from three different language families. We also describe the defence mechanism against the aforementioned attack and discuss its limitations.</abstract>
<identifier type="citekey">berezin-etal-2023-offence</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.155</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.155</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>2362</start>
<end>2369</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T No offence, Bert - I insult only humans! Multilingual sentence-level attack on toxicity detection networks
%A Berezin, Sergey
%A Farahbakhsh, Reza
%A Crespi, Noel
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F berezin-etal-2023-offence
%X We introduce a simple yet efficient sentence-level attack on black-box toxicity detector models. By adding several positive words or sentences to the end of a hateful message, we are able to change the prediction of a neural network and pass the toxicity detection system check. This approach is shown to be working on seven languages from three different language families. We also describe the defence mechanism against the aforementioned attack and discuss its limitations.
%R 10.18653/v1/2023.findings-emnlp.155
%U https://aclanthology.org/2023.findings-emnlp.155
%U https://doi.org/10.18653/v1/2023.findings-emnlp.155
%P 2362-2369
Markdown (Informal)
[No offence, Bert - I insult only humans! Multilingual sentence-level attack on toxicity detection networks](https://aclanthology.org/2023.findings-emnlp.155) (Berezin et al., Findings 2023)
ACL