@inproceedings{gao-etal-2023-continually,
title = "Continually Improving Extractive {QA} via Human Feedback",
author = "Gao, Ge and
Chen, Hung-Ting and
Artzi, Yoav and
Choi, Eunsol",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-main.27",
doi = "10.18653/v1/2023.emnlp-main.27",
pages = "406--423",
abstract = "We study continually improving an extractive question answering (QA) system via human user feedback. We design and deploy an iterative approach, where information-seeking users ask questions, receive model-predicted answers, and provide feedback. We conduct experiments involving thousands of user interactions under diverse setups to broaden the understanding of learning from feedback over time. Our experiments show effective improvement from user feedback of extractive QA models over time across different data regimes, including significant potential for domain adaptation.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gao-etal-2023-continually">
<titleInfo>
<title>Continually Improving Extractive QA via Human Feedback</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ge</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hung-Ting</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Artzi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eunsol</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We study continually improving an extractive question answering (QA) system via human user feedback. We design and deploy an iterative approach, where information-seeking users ask questions, receive model-predicted answers, and provide feedback. We conduct experiments involving thousands of user interactions under diverse setups to broaden the understanding of learning from feedback over time. Our experiments show effective improvement from user feedback of extractive QA models over time across different data regimes, including significant potential for domain adaptation.</abstract>
<identifier type="citekey">gao-etal-2023-continually</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-main.27</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-main.27</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>406</start>
<end>423</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Continually Improving Extractive QA via Human Feedback
%A Gao, Ge
%A Chen, Hung-Ting
%A Artzi, Yoav
%A Choi, Eunsol
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F gao-etal-2023-continually
%X We study continually improving an extractive question answering (QA) system via human user feedback. We design and deploy an iterative approach, where information-seeking users ask questions, receive model-predicted answers, and provide feedback. We conduct experiments involving thousands of user interactions under diverse setups to broaden the understanding of learning from feedback over time. Our experiments show effective improvement from user feedback of extractive QA models over time across different data regimes, including significant potential for domain adaptation.
%R 10.18653/v1/2023.emnlp-main.27
%U https://aclanthology.org/2023.emnlp-main.27
%U https://doi.org/10.18653/v1/2023.emnlp-main.27
%P 406-423
Markdown (Informal)
[Continually Improving Extractive QA via Human Feedback](https://aclanthology.org/2023.emnlp-main.27) (Gao et al., EMNLP 2023)
ACL
- Ge Gao, Hung-Ting Chen, Yoav Artzi, and Eunsol Choi. 2023. Continually Improving Extractive QA via Human Feedback. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 406–423, Singapore. Association for Computational Linguistics.