@inproceedings{liu-etal-2024-se2,
title = "$Se^2$: Sequential Example Selection for In-Context Learning",
author = "Liu, Haoyu and
Liu, Jianfeng and
Huang, Shaohan and
Zhan, Yuefeng and
Sun, Hao and
Deng, Weiwei and
Wei, Furu and
Zhang, Qi",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-acl.312",
doi = "10.18653/v1/2024.findings-acl.312",
pages = "5262--5284",
abstract = "The remarkable capability of large language models(LLMs) for in-context learning(ICL) needs to be activated by demonstration examples. Prior work has extensively explored the selection of examples for ICL, predominantly following the {``}select then organize{''} paradigm, such approaches often neglect the internal relationships between examples and exist an inconsistency between the training and inference. In this paper, we formulate the problem as a $Se$quential $Se$lection problem and introduce $Se^2$, a sequential-aware method that leverages the LLM{'}s feedback on varying context, aiding in capturing inter-relationships and sequential information among examples, significantly enriching the contextuality and relevance of ICL prompts. Meanwhile, we utilize beam search to seek and construct example sequences, enhancing both quality and diversity. Extensive experiments across 23 NLP tasks from 8 distinct categories illustrate that $Se^2$ markedly surpasses competitive baselines and achieves 42{\%} relative improvement over random selection. Further in-depth analysis shows the effectiveness of proposed strategies, highlighting $Se^2${`}s exceptional stability and adaptability across various scenarios. Code available at https://github.com/microsoft/LMOps.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2024-se2">
<titleInfo>
<title>Se²: Sequential Example Selection for In-Context Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Haoyu</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jianfeng</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shaohan</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuefeng</namePart>
<namePart type="family">Zhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hao</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weiwei</namePart>
<namePart type="family">Deng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Furu</namePart>
<namePart type="family">Wei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qi</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The remarkable capability of large language models(LLMs) for in-context learning(ICL) needs to be activated by demonstration examples. Prior work has extensively explored the selection of examples for ICL, predominantly following the “select then organize” paradigm, such approaches often neglect the internal relationships between examples and exist an inconsistency between the training and inference. In this paper, we formulate the problem as a Sequential Selection problem and introduce Se², a sequential-aware method that leverages the LLM’s feedback on varying context, aiding in capturing inter-relationships and sequential information among examples, significantly enriching the contextuality and relevance of ICL prompts. Meanwhile, we utilize beam search to seek and construct example sequences, enhancing both quality and diversity. Extensive experiments across 23 NLP tasks from 8 distinct categories illustrate that Se² markedly surpasses competitive baselines and achieves 42% relative improvement over random selection. Further in-depth analysis shows the effectiveness of proposed strategies, highlighting Se²‘s exceptional stability and adaptability across various scenarios. Code available at https://github.com/microsoft/LMOps.</abstract>
<identifier type="citekey">liu-etal-2024-se2</identifier>
<identifier type="doi">10.18653/v1/2024.findings-acl.312</identifier>
<location>
<url>https://aclanthology.org/2024.findings-acl.312</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>5262</start>
<end>5284</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Se²: Sequential Example Selection for In-Context Learning
%A Liu, Haoyu
%A Liu, Jianfeng
%A Huang, Shaohan
%A Zhan, Yuefeng
%A Sun, Hao
%A Deng, Weiwei
%A Wei, Furu
%A Zhang, Qi
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Findings of the Association for Computational Linguistics: ACL 2024
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F liu-etal-2024-se2
%X The remarkable capability of large language models(LLMs) for in-context learning(ICL) needs to be activated by demonstration examples. Prior work has extensively explored the selection of examples for ICL, predominantly following the “select then organize” paradigm, such approaches often neglect the internal relationships between examples and exist an inconsistency between the training and inference. In this paper, we formulate the problem as a Sequential Selection problem and introduce Se², a sequential-aware method that leverages the LLM’s feedback on varying context, aiding in capturing inter-relationships and sequential information among examples, significantly enriching the contextuality and relevance of ICL prompts. Meanwhile, we utilize beam search to seek and construct example sequences, enhancing both quality and diversity. Extensive experiments across 23 NLP tasks from 8 distinct categories illustrate that Se² markedly surpasses competitive baselines and achieves 42% relative improvement over random selection. Further in-depth analysis shows the effectiveness of proposed strategies, highlighting Se²‘s exceptional stability and adaptability across various scenarios. Code available at https://github.com/microsoft/LMOps.
%R 10.18653/v1/2024.findings-acl.312
%U https://aclanthology.org/2024.findings-acl.312
%U https://doi.org/10.18653/v1/2024.findings-acl.312
%P 5262-5284
Markdown (Informal)
[Se2: Sequential Example Selection for In-Context Learning](https://aclanthology.org/2024.findings-acl.312) (Liu et al., Findings 2024)
ACL
- Haoyu Liu, Jianfeng Liu, Shaohan Huang, Yuefeng Zhan, Hao Sun, Weiwei Deng, Furu Wei, and Qi Zhang. 2024. Se2: Sequential Example Selection for In-Context Learning. In Findings of the Association for Computational Linguistics: ACL 2024, pages 5262–5284, Bangkok, Thailand. Association for Computational Linguistics.