-
Notifications
You must be signed in to change notification settings - Fork 1
/
2022.blackboxnlp-1.7.bib
15 lines (15 loc) · 1.36 KB
/
2022.blackboxnlp-1.7.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
@inproceedings{de-bruyn-etal-2022-smaller,
title = "Is It Smaller Than a Tennis Ball? Language Models Play the Game of Twenty Questions",
author = "De Bruyn, Maxime and
Lotfi, Ehsan and
Buhmann, Jeska and
Daelemans, Walter",
booktitle = "Proceedings of the Fifth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.blackboxnlp-1.7",
pages = "80--90",
abstract = "Researchers often use games to analyze the abilities of Artificial Intelligence models. In this work, we use the game of Twenty Questions to study the world knowledge of language models. Despite its simplicity for humans, this game requires a broad knowledge of the world to answer yes/no questions. We evaluate several language models on this task and find that only the largest model has enough world knowledge to play it well, although it still has difficulties with the shape and size of objects. We also present a new method to improve the knowledge of smaller models by leveraging external information from the web. Finally, we release our dataset and Twentle, a website to interactively test the knowledge of language models by playing Twenty Questions.",
}