@inproceedings{shaham-levy-2022-get,
title = "What Do You Get When You Cross Beam Search with Nucleus Sampling?",
author = "Shaham, Uri and
Levy, Omer",
editor = "Tafreshi, Shabnam and
Sedoc, Jo{\~a}o and
Rogers, Anna and
Drozd, Aleksandr and
Rumshisky, Anna and
Akula, Arjun",
booktitle = "Proceedings of the Third Workshop on Insights from Negative Results in NLP",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.insights-1.5/",
doi = "10.18653/v1/2022.insights-1.5",
pages = "38--45",
abstract = "We combine beam search with the probabilistic pruning technique of nucleus sampling to create two deterministic nucleus search algorithms for natural language generation. The first algorithm, p-exact search, locally prunes the next-token distribution and performs an exact search over the remaining space. The second algorithm, dynamic beam search, shrinks and expands the beam size according to the entropy of the candidate`s probability distribution. Despite the probabilistic intuition behind nucleus search, experiments on machine translation and summarization benchmarks show that both algorithms reach the same performance levels as standard beam search."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shaham-levy-2022-get">
<titleInfo>
<title>What Do You Get When You Cross Beam Search with Nucleus Sampling?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Uri</namePart>
<namePart type="family">Shaham</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Omer</namePart>
<namePart type="family">Levy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Insights from Negative Results in NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shabnam</namePart>
<namePart type="family">Tafreshi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Sedoc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aleksandr</namePart>
<namePart type="family">Drozd</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rumshisky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arjun</namePart>
<namePart type="family">Akula</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We combine beam search with the probabilistic pruning technique of nucleus sampling to create two deterministic nucleus search algorithms for natural language generation. The first algorithm, p-exact search, locally prunes the next-token distribution and performs an exact search over the remaining space. The second algorithm, dynamic beam search, shrinks and expands the beam size according to the entropy of the candidate‘s probability distribution. Despite the probabilistic intuition behind nucleus search, experiments on machine translation and summarization benchmarks show that both algorithms reach the same performance levels as standard beam search.</abstract>
<identifier type="citekey">shaham-levy-2022-get</identifier>
<identifier type="doi">10.18653/v1/2022.insights-1.5</identifier>
<location>
<url>https://aclanthology.org/2022.insights-1.5/</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>38</start>
<end>45</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What Do You Get When You Cross Beam Search with Nucleus Sampling?
%A Shaham, Uri
%A Levy, Omer
%Y Tafreshi, Shabnam
%Y Sedoc, João
%Y Rogers, Anna
%Y Drozd, Aleksandr
%Y Rumshisky, Anna
%Y Akula, Arjun
%S Proceedings of the Third Workshop on Insights from Negative Results in NLP
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F shaham-levy-2022-get
%X We combine beam search with the probabilistic pruning technique of nucleus sampling to create two deterministic nucleus search algorithms for natural language generation. The first algorithm, p-exact search, locally prunes the next-token distribution and performs an exact search over the remaining space. The second algorithm, dynamic beam search, shrinks and expands the beam size according to the entropy of the candidate‘s probability distribution. Despite the probabilistic intuition behind nucleus search, experiments on machine translation and summarization benchmarks show that both algorithms reach the same performance levels as standard beam search.
%R 10.18653/v1/2022.insights-1.5
%U https://aclanthology.org/2022.insights-1.5/
%U https://doi.org/10.18653/v1/2022.insights-1.5
%P 38-45
Markdown (Informal)
[What Do You Get When You Cross Beam Search with Nucleus Sampling?](https://aclanthology.org/2022.insights-1.5/) (Shaham & Levy, insights 2022)
- What Do You Get When You Cross Beam Search with Nucleus Sampling? (Shaham & Levy, insights 2022)
ACL
- Uri Shaham and Omer Levy. 2022. What Do You Get When You Cross Beam Search with Nucleus Sampling?. In Proceedings of the Third Workshop on Insights from Negative Results in NLP, pages 38–45, Dublin, Ireland. Association for Computational Linguistics.