@inproceedings{miaschi-dellorletta-2020-contextual,
title = "Contextual and Non-Contextual Word Embeddings: an in-depth Linguistic Investigation",
author = "Miaschi, Alessio and
Dell{'}Orletta, Felice",
editor = "Gella, Spandana and
Welbl, Johannes and
Rei, Marek and
Petroni, Fabio and
Lewis, Patrick and
Strubell, Emma and
Seo, Minjoon and
Hajishirzi, Hannaneh",
booktitle = "Proceedings of the 5th Workshop on Representation Learning for NLP",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.repl4nlp-1.15",
doi = "10.18653/v1/2020.repl4nlp-1.15",
pages = "110--119",
abstract = "In this paper we present a comparison between the linguistic knowledge encoded in the internal representations of a contextual Language Model (BERT) and a contextual-independent one (Word2vec). We use a wide set of probing tasks, each of which corresponds to a distinct sentence-level feature extracted from different levels of linguistic annotation. We show that, although BERT is capable of understanding the full context of each word in an input sequence, the implicit knowledge encoded in its aggregated sentence representations is still comparable to that of a contextual-independent model. We also find that BERT is able to encode sentence-level properties even within single-word embeddings, obtaining comparable or even superior results than those obtained with sentence representations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="miaschi-dellorletta-2020-contextual">
<titleInfo>
<title>Contextual and Non-Contextual Word Embeddings: an in-depth Linguistic Investigation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alessio</namePart>
<namePart type="family">Miaschi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Felice</namePart>
<namePart type="family">Dell’Orletta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 5th Workshop on Representation Learning for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Spandana</namePart>
<namePart type="family">Gella</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Johannes</namePart>
<namePart type="family">Welbl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marek</namePart>
<namePart type="family">Rei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fabio</namePart>
<namePart type="family">Petroni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Patrick</namePart>
<namePart type="family">Lewis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emma</namePart>
<namePart type="family">Strubell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Minjoon</namePart>
<namePart type="family">Seo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hannaneh</namePart>
<namePart type="family">Hajishirzi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we present a comparison between the linguistic knowledge encoded in the internal representations of a contextual Language Model (BERT) and a contextual-independent one (Word2vec). We use a wide set of probing tasks, each of which corresponds to a distinct sentence-level feature extracted from different levels of linguistic annotation. We show that, although BERT is capable of understanding the full context of each word in an input sequence, the implicit knowledge encoded in its aggregated sentence representations is still comparable to that of a contextual-independent model. We also find that BERT is able to encode sentence-level properties even within single-word embeddings, obtaining comparable or even superior results than those obtained with sentence representations.</abstract>
<identifier type="citekey">miaschi-dellorletta-2020-contextual</identifier>
<identifier type="doi">10.18653/v1/2020.repl4nlp-1.15</identifier>
<location>
<url>https://aclanthology.org/2020.repl4nlp-1.15</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>110</start>
<end>119</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Contextual and Non-Contextual Word Embeddings: an in-depth Linguistic Investigation
%A Miaschi, Alessio
%A Dell’Orletta, Felice
%Y Gella, Spandana
%Y Welbl, Johannes
%Y Rei, Marek
%Y Petroni, Fabio
%Y Lewis, Patrick
%Y Strubell, Emma
%Y Seo, Minjoon
%Y Hajishirzi, Hannaneh
%S Proceedings of the 5th Workshop on Representation Learning for NLP
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F miaschi-dellorletta-2020-contextual
%X In this paper we present a comparison between the linguistic knowledge encoded in the internal representations of a contextual Language Model (BERT) and a contextual-independent one (Word2vec). We use a wide set of probing tasks, each of which corresponds to a distinct sentence-level feature extracted from different levels of linguistic annotation. We show that, although BERT is capable of understanding the full context of each word in an input sequence, the implicit knowledge encoded in its aggregated sentence representations is still comparable to that of a contextual-independent model. We also find that BERT is able to encode sentence-level properties even within single-word embeddings, obtaining comparable or even superior results than those obtained with sentence representations.
%R 10.18653/v1/2020.repl4nlp-1.15
%U https://aclanthology.org/2020.repl4nlp-1.15
%U https://doi.org/10.18653/v1/2020.repl4nlp-1.15
%P 110-119
Markdown (Informal)
[Contextual and Non-Contextual Word Embeddings: an in-depth Linguistic Investigation](https://aclanthology.org/2020.repl4nlp-1.15) (Miaschi & Dell’Orletta, RepL4NLP 2020)
ACL