@inproceedings{kreutzer-etal-2021-offline,
title = "Offline Reinforcement Learning from Human Feedback in Real-World Sequence-to-Sequence Tasks",
author = "Kreutzer, Julia and
Riezler, Stefan and
Lawrence, Carolin",
editor = "Kozareva, Zornitsa and
Ravi, Sujith and
Vlachos, Andreas and
Agrawal, Priyanka and
Martins, Andr{\'e}",
booktitle = "Proceedings of the 5th Workshop on Structured Prediction for NLP (SPNLP 2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.spnlp-1.4/",
doi = "10.18653/v1/2021.spnlp-1.4",
pages = "37--43",
abstract = "Large volumes of interaction logs can be collected from NLP systems that are deployed in the real world. How can this wealth of information be leveraged? Using such interaction logs in an offline reinforcement learning (RL) setting is a promising approach. However, due to the nature of NLP tasks and the constraints of production systems, a series of challenges arise. We present a concise overview of these challenges and discuss possible solutions."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kreutzer-etal-2021-offline">
<titleInfo>
<title>Offline Reinforcement Learning from Human Feedback in Real-World Sequence-to-Sequence Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Kreutzer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Riezler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolin</namePart>
<namePart type="family">Lawrence</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 5th Workshop on Structured Prediction for NLP (SPNLP 2021)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sujith</namePart>
<namePart type="family">Ravi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Priyanka</namePart>
<namePart type="family">Agrawal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">André</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large volumes of interaction logs can be collected from NLP systems that are deployed in the real world. How can this wealth of information be leveraged? Using such interaction logs in an offline reinforcement learning (RL) setting is a promising approach. However, due to the nature of NLP tasks and the constraints of production systems, a series of challenges arise. We present a concise overview of these challenges and discuss possible solutions.</abstract>
<identifier type="citekey">kreutzer-etal-2021-offline</identifier>
<identifier type="doi">10.18653/v1/2021.spnlp-1.4</identifier>
<location>
<url>https://aclanthology.org/2021.spnlp-1.4/</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>37</start>
<end>43</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Offline Reinforcement Learning from Human Feedback in Real-World Sequence-to-Sequence Tasks
%A Kreutzer, Julia
%A Riezler, Stefan
%A Lawrence, Carolin
%Y Kozareva, Zornitsa
%Y Ravi, Sujith
%Y Vlachos, Andreas
%Y Agrawal, Priyanka
%Y Martins, André
%S Proceedings of the 5th Workshop on Structured Prediction for NLP (SPNLP 2021)
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F kreutzer-etal-2021-offline
%X Large volumes of interaction logs can be collected from NLP systems that are deployed in the real world. How can this wealth of information be leveraged? Using such interaction logs in an offline reinforcement learning (RL) setting is a promising approach. However, due to the nature of NLP tasks and the constraints of production systems, a series of challenges arise. We present a concise overview of these challenges and discuss possible solutions.
%R 10.18653/v1/2021.spnlp-1.4
%U https://aclanthology.org/2021.spnlp-1.4/
%U https://doi.org/10.18653/v1/2021.spnlp-1.4
%P 37-43
Markdown (Informal)
[Offline Reinforcement Learning from Human Feedback in Real-World Sequence-to-Sequence Tasks](https://aclanthology.org/2021.spnlp-1.4/) (Kreutzer et al., spnlp 2021)
ACL