@inproceedings{thota-nilizadeh-2024-attacks,
title = "Attacks against Abstractive Text Summarization Models through Lead Bias and Influence Functions",
author = "Thota, Poojitha and
Nilizadeh, Shirin",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-emnlp.804/",
doi = "10.18653/v1/2024.findings-emnlp.804",
pages = "13727--13741",
abstract = "Large Language Models (LLMs) have introduced novel opportunities for text comprehension and generation. Yet, they are vulnerable to adversarial perturbations and data poisoning attacks, particularly in tasks like text classification and translation. However, the adversarial robustness of abstractive text summarization models remains less explored. In this work, we unveil a novel approach by exploiting the inherent lead bias in summarization models, to perform adversarial perturbations. Furthermore, we introduce an innovative application of influence functions, to execute data poisoning, which compromises the model`s integrity. This approach not only shows a skew in the models' behavior to produce desired outcomes but also shows a new behavioral change, where models under attack tend to generate extractive summaries rather than abstractive summaries."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="thota-nilizadeh-2024-attacks">
<titleInfo>
<title>Attacks against Abstractive Text Summarization Models through Lead Bias and Influence Functions</title>
</titleInfo>
<name type="personal">
<namePart type="given">Poojitha</namePart>
<namePart type="family">Thota</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shirin</namePart>
<namePart type="family">Nilizadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large Language Models (LLMs) have introduced novel opportunities for text comprehension and generation. Yet, they are vulnerable to adversarial perturbations and data poisoning attacks, particularly in tasks like text classification and translation. However, the adversarial robustness of abstractive text summarization models remains less explored. In this work, we unveil a novel approach by exploiting the inherent lead bias in summarization models, to perform adversarial perturbations. Furthermore, we introduce an innovative application of influence functions, to execute data poisoning, which compromises the model‘s integrity. This approach not only shows a skew in the models’ behavior to produce desired outcomes but also shows a new behavioral change, where models under attack tend to generate extractive summaries rather than abstractive summaries.</abstract>
<identifier type="citekey">thota-nilizadeh-2024-attacks</identifier>
<identifier type="doi">10.18653/v1/2024.findings-emnlp.804</identifier>
<location>
<url>https://aclanthology.org/2024.findings-emnlp.804/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>13727</start>
<end>13741</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Attacks against Abstractive Text Summarization Models through Lead Bias and Influence Functions
%A Thota, Poojitha
%A Nilizadeh, Shirin
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Findings of the Association for Computational Linguistics: EMNLP 2024
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F thota-nilizadeh-2024-attacks
%X Large Language Models (LLMs) have introduced novel opportunities for text comprehension and generation. Yet, they are vulnerable to adversarial perturbations and data poisoning attacks, particularly in tasks like text classification and translation. However, the adversarial robustness of abstractive text summarization models remains less explored. In this work, we unveil a novel approach by exploiting the inherent lead bias in summarization models, to perform adversarial perturbations. Furthermore, we introduce an innovative application of influence functions, to execute data poisoning, which compromises the model‘s integrity. This approach not only shows a skew in the models’ behavior to produce desired outcomes but also shows a new behavioral change, where models under attack tend to generate extractive summaries rather than abstractive summaries.
%R 10.18653/v1/2024.findings-emnlp.804
%U https://aclanthology.org/2024.findings-emnlp.804/
%U https://doi.org/10.18653/v1/2024.findings-emnlp.804
%P 13727-13741
Markdown (Informal)
[Attacks against Abstractive Text Summarization Models through Lead Bias and Influence Functions](https://aclanthology.org/2024.findings-emnlp.804/) (Thota & Nilizadeh, Findings 2024)
ACL