@inproceedings{mutlu-etal-2019-team,
title = "Team {H}oward {B}eale at {S}em{E}val-2019 Task 4: Hyperpartisan News Detection with {BERT}",
author = "Mutlu, Osman and
Can, Ozan Arkan and
Dayanik, Erenay",
editor = "May, Jonathan and
Shutova, Ekaterina and
Herbelot, Aurelie and
Zhu, Xiaodan and
Apidianaki, Marianna and
Mohammad, Saif M.",
booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S19-2175",
doi = "10.18653/v1/S19-2175",
pages = "1007--1011",
abstract = "This paper describes our system for SemEval-2019 Task 4: Hyperpartisan News Detection (Kiesel et al., 2019). We use pretrained BERT (Devlin et al., 2018) architecture and investigate the effect of different fine tuning regimes on the final classification task. We show that additional pretraining on news domain improves the performance on the Hyperpartisan News Detection task. Our system ranked 8th out of 42 teams with 78.3{\%} accuracy on the held-out test dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mutlu-etal-2019-team">
<titleInfo>
<title>Team Howard Beale at SemEval-2019 Task 4: Hyperpartisan News Detection with BERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Osman</namePart>
<namePart type="family">Mutlu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ozan</namePart>
<namePart type="given">Arkan</namePart>
<namePart type="family">Can</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Erenay</namePart>
<namePart type="family">Dayanik</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 13th International Workshop on Semantic Evaluation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jonathan</namePart>
<namePart type="family">May</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aurelie</namePart>
<namePart type="family">Herbelot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaodan</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saif</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Mohammad</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, Minnesota, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes our system for SemEval-2019 Task 4: Hyperpartisan News Detection (Kiesel et al., 2019). We use pretrained BERT (Devlin et al., 2018) architecture and investigate the effect of different fine tuning regimes on the final classification task. We show that additional pretraining on news domain improves the performance on the Hyperpartisan News Detection task. Our system ranked 8th out of 42 teams with 78.3% accuracy on the held-out test dataset.</abstract>
<identifier type="citekey">mutlu-etal-2019-team</identifier>
<identifier type="doi">10.18653/v1/S19-2175</identifier>
<location>
<url>https://aclanthology.org/S19-2175</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>1007</start>
<end>1011</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Team Howard Beale at SemEval-2019 Task 4: Hyperpartisan News Detection with BERT
%A Mutlu, Osman
%A Can, Ozan Arkan
%A Dayanik, Erenay
%Y May, Jonathan
%Y Shutova, Ekaterina
%Y Herbelot, Aurelie
%Y Zhu, Xiaodan
%Y Apidianaki, Marianna
%Y Mohammad, Saif M.
%S Proceedings of the 13th International Workshop on Semantic Evaluation
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, Minnesota, USA
%F mutlu-etal-2019-team
%X This paper describes our system for SemEval-2019 Task 4: Hyperpartisan News Detection (Kiesel et al., 2019). We use pretrained BERT (Devlin et al., 2018) architecture and investigate the effect of different fine tuning regimes on the final classification task. We show that additional pretraining on news domain improves the performance on the Hyperpartisan News Detection task. Our system ranked 8th out of 42 teams with 78.3% accuracy on the held-out test dataset.
%R 10.18653/v1/S19-2175
%U https://aclanthology.org/S19-2175
%U https://doi.org/10.18653/v1/S19-2175
%P 1007-1011
Markdown (Informal)
[Team Howard Beale at SemEval-2019 Task 4: Hyperpartisan News Detection with BERT](https://aclanthology.org/S19-2175) (Mutlu et al., SemEval 2019)
ACL