@inproceedings{demeester-etal-2018-predefined,
title = "Predefined Sparseness in Recurrent Sequence Models",
author = "Demeester, Thomas and
Deleu, Johannes and
Godin, Fr{\'e}deric and
Develder, Chris",
editor = "Korhonen, Anna and
Titov, Ivan",
booktitle = "Proceedings of the 22nd Conference on Computational Natural Language Learning",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K18-1032",
doi = "10.18653/v1/K18-1032",
pages = "324--333",
abstract = "Inducing sparseness while training neural networks has been shown to yield models with a lower memory footprint but similar effectiveness to dense models. However, sparseness is typically induced starting from a dense model, and thus this advantage does not hold during training. We propose techniques to enforce sparseness upfront in recurrent sequence models for NLP applications, to also benefit training. First, in language modeling, we show how to increase hidden state sizes in recurrent layers without increasing the number of parameters, leading to more expressive models. Second, for sequence labeling, we show that word embeddings with predefined sparseness lead to similar performance as dense embeddings, at a fraction of the number of trainable parameters.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="demeester-etal-2018-predefined">
<titleInfo>
<title>Predefined Sparseness in Recurrent Sequence Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Demeester</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Johannes</namePart>
<namePart type="family">Deleu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fréderic</namePart>
<namePart type="family">Godin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chris</namePart>
<namePart type="family">Develder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 22nd Conference on Computational Natural Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Titov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Inducing sparseness while training neural networks has been shown to yield models with a lower memory footprint but similar effectiveness to dense models. However, sparseness is typically induced starting from a dense model, and thus this advantage does not hold during training. We propose techniques to enforce sparseness upfront in recurrent sequence models for NLP applications, to also benefit training. First, in language modeling, we show how to increase hidden state sizes in recurrent layers without increasing the number of parameters, leading to more expressive models. Second, for sequence labeling, we show that word embeddings with predefined sparseness lead to similar performance as dense embeddings, at a fraction of the number of trainable parameters.</abstract>
<identifier type="citekey">demeester-etal-2018-predefined</identifier>
<identifier type="doi">10.18653/v1/K18-1032</identifier>
<location>
<url>https://aclanthology.org/K18-1032</url>
</location>
<part>
<date>2018-10</date>
<extent unit="page">
<start>324</start>
<end>333</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Predefined Sparseness in Recurrent Sequence Models
%A Demeester, Thomas
%A Deleu, Johannes
%A Godin, Fréderic
%A Develder, Chris
%Y Korhonen, Anna
%Y Titov, Ivan
%S Proceedings of the 22nd Conference on Computational Natural Language Learning
%D 2018
%8 October
%I Association for Computational Linguistics
%C Brussels, Belgium
%F demeester-etal-2018-predefined
%X Inducing sparseness while training neural networks has been shown to yield models with a lower memory footprint but similar effectiveness to dense models. However, sparseness is typically induced starting from a dense model, and thus this advantage does not hold during training. We propose techniques to enforce sparseness upfront in recurrent sequence models for NLP applications, to also benefit training. First, in language modeling, we show how to increase hidden state sizes in recurrent layers without increasing the number of parameters, leading to more expressive models. Second, for sequence labeling, we show that word embeddings with predefined sparseness lead to similar performance as dense embeddings, at a fraction of the number of trainable parameters.
%R 10.18653/v1/K18-1032
%U https://aclanthology.org/K18-1032
%U https://doi.org/10.18653/v1/K18-1032
%P 324-333
Markdown (Informal)
[Predefined Sparseness in Recurrent Sequence Models](https://aclanthology.org/K18-1032) (Demeester et al., CoNLL 2018)
ACL
- Thomas Demeester, Johannes Deleu, Fréderic Godin, and Chris Develder. 2018. Predefined Sparseness in Recurrent Sequence Models. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 324–333, Brussels, Belgium. Association for Computational Linguistics.