@inproceedings{yu-etal-2024-self,
title = "Self-Modifying State Modeling for Simultaneous Machine Translation",
author = "Yu, Donglei and
Kang, Xiaomian and
Liu, Yuchen and
Zhou, Yu and
Zong, Chengqing",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.acl-long.528/",
doi = "10.18653/v1/2024.acl-long.528",
pages = "9781--9795",
abstract = "Simultaneous Machine Translation (SiMT) generates target outputs while receiving stream source inputs and requires a read/write policy to decide whether to wait for the next source token or generate a new target token, whose decisions form a decision path. Existing SiMT methods, which learn the policy by exploring various decision paths in training, face inherent limitations. These methods not only fail to precisely optimize the policy due to the inability to accurately assess the individual impact of each decision on SiMT performance, but also cannot sufficiently explore all potential paths because of their vast number. Besides, building decision paths requires unidirectional encoders to simulate streaming source inputs, which impairs the translation quality of SiMT models. To solve these issues, we propose Self-Modifying State Modeling (SM$^2$), a novel training paradigm for SiMT task. Without building decision paths, SM$^2$ individually optimizes decisions at each state during training. To precisely optimize the policy, SM$^2$ introduces Self-Modifying process to independently assess and adjust decisions at each state. For sufficient exploration, SM$^2$ proposes Prefix Sampling to efficiently traverse all potential states. Moreover, SM$^2$ ensures compatibility with bidirectional encoders, thus achieving higher translation quality. Experiments show that SM$^2$ outperforms strong baselines. Furthermore, SM$^2$ allows offline machine translation models to acquire SiMT ability with fine-tuning."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yu-etal-2024-self">
<titleInfo>
<title>Self-Modifying State Modeling for Simultaneous Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Donglei</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaomian</namePart>
<namePart type="family">Kang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuchen</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Simultaneous Machine Translation (SiMT) generates target outputs while receiving stream source inputs and requires a read/write policy to decide whether to wait for the next source token or generate a new target token, whose decisions form a decision path. Existing SiMT methods, which learn the policy by exploring various decision paths in training, face inherent limitations. These methods not only fail to precisely optimize the policy due to the inability to accurately assess the individual impact of each decision on SiMT performance, but also cannot sufficiently explore all potential paths because of their vast number. Besides, building decision paths requires unidirectional encoders to simulate streaming source inputs, which impairs the translation quality of SiMT models. To solve these issues, we propose Self-Modifying State Modeling (SM²), a novel training paradigm for SiMT task. Without building decision paths, SM² individually optimizes decisions at each state during training. To precisely optimize the policy, SM² introduces Self-Modifying process to independently assess and adjust decisions at each state. For sufficient exploration, SM² proposes Prefix Sampling to efficiently traverse all potential states. Moreover, SM² ensures compatibility with bidirectional encoders, thus achieving higher translation quality. Experiments show that SM² outperforms strong baselines. Furthermore, SM² allows offline machine translation models to acquire SiMT ability with fine-tuning.</abstract>
<identifier type="citekey">yu-etal-2024-self</identifier>
<identifier type="doi">10.18653/v1/2024.acl-long.528</identifier>
<location>
<url>https://aclanthology.org/2024.acl-long.528/</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>9781</start>
<end>9795</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Self-Modifying State Modeling for Simultaneous Machine Translation
%A Yu, Donglei
%A Kang, Xiaomian
%A Liu, Yuchen
%A Zhou, Yu
%A Zong, Chengqing
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F yu-etal-2024-self
%X Simultaneous Machine Translation (SiMT) generates target outputs while receiving stream source inputs and requires a read/write policy to decide whether to wait for the next source token or generate a new target token, whose decisions form a decision path. Existing SiMT methods, which learn the policy by exploring various decision paths in training, face inherent limitations. These methods not only fail to precisely optimize the policy due to the inability to accurately assess the individual impact of each decision on SiMT performance, but also cannot sufficiently explore all potential paths because of their vast number. Besides, building decision paths requires unidirectional encoders to simulate streaming source inputs, which impairs the translation quality of SiMT models. To solve these issues, we propose Self-Modifying State Modeling (SM²), a novel training paradigm for SiMT task. Without building decision paths, SM² individually optimizes decisions at each state during training. To precisely optimize the policy, SM² introduces Self-Modifying process to independently assess and adjust decisions at each state. For sufficient exploration, SM² proposes Prefix Sampling to efficiently traverse all potential states. Moreover, SM² ensures compatibility with bidirectional encoders, thus achieving higher translation quality. Experiments show that SM² outperforms strong baselines. Furthermore, SM² allows offline machine translation models to acquire SiMT ability with fine-tuning.
%R 10.18653/v1/2024.acl-long.528
%U https://aclanthology.org/2024.acl-long.528/
%U https://doi.org/10.18653/v1/2024.acl-long.528
%P 9781-9795
Markdown (Informal)
[Self-Modifying State Modeling for Simultaneous Machine Translation](https://aclanthology.org/2024.acl-long.528/) (Yu et al., ACL 2024)
ACL
- Donglei Yu, Xiaomian Kang, Yuchen Liu, Yu Zhou, and Chengqing Zong. 2024. Self-Modifying State Modeling for Simultaneous Machine Translation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9781–9795, Bangkok, Thailand. Association for Computational Linguistics.