@inproceedings{254c6ecef13b4f8289df974830b9841c,
title = "Simpler becomes Harder: Do LLMs Exhibit a Coherent Behavior on Simplified Corpora?",
abstract = "Text simplification seeks to improve readability while retaining the original content and meaning. Our study investigates whether pre-trained classifiers also maintain such coherence by comparing their predictions on both original and simplified inputs. We conduct experiments using 11 pre-trained models, including BERT and OpenAI{\textquoteright}s GPT 3.5, across six datasets spanning three languages. Additionally, we conduct a detailed analysis of the correlation between prediction change rates and simplification types/strengths. Our findings reveal alarming inconsistencies across all languages and models. If not promptly addressed, simplified inputs can be easily exploited to craft zero-iteration model-agnostic adversarial attacks with success rates of up to 50%.",
keywords = "model consistency, model robustness, text simplification",
author = "Miriam Ansch{\"u}tz and Edoardo Mosca and Georg Groh",
note = "Publisher Copyright: {\textcopyright} 2024 ELRA Language Resource Association.; 1st DeTermIt! Evaluating Text Difficulty in a Multilingual Context, DeTermIt! 2024 ; Conference date: 21-05-2024",
year = "2024",
language = "English",
series = "DeTermIt! Evaluating Text Difficulty in a Multilingual Context, DeTermIt! 2024 at LREC-COLING 2024 - Workshop Proceedings",
publisher = "European Language Resources Association (ELRA)",
pages = "185--195",
editor = "{Di Nunzio}, {Giorgio Maria} and Federica Vezzani and Liana Ermakova and Hosein Azarbonyad and Jaap Kamps",
booktitle = "DeTermIt! Evaluating Text Difficulty in a Multilingual Context, DeTermIt! 2024 at LREC-COLING 2024 - Workshop Proceedings",
}