@inproceedings{Suttle2026, title = {Persuadability and {{LLMs}} as {{Legal Decision Tools}}}, booktitle = {Proceedings of the 21st {{International Conference}} on {{Artificial Intelligence}} and {{Law}} ({{ICAIL}} 2026)}, author = {Suttle, Oisin and Lillis, David}, year = {2026}, month = {June}, address = {Singapore}, abstract = {As Large Language Models (LLMs) are proposed as legal decision assistants, and even first-instance decision-makers, across a range of judicial and administrative contexts, it becomes essential to explore how they answer legal questions, and in particular the factors that lead them to decide difficult questions in one way or another. A specific feature of legal decisions is the need to respond to arguments advanced by contending parties. A legal decision-maker must be able to engage with, and respond to, including through being potentially persuaded by, arguments advanced by the parties. Conversely, they should not be unduly persuadable, influenced by a particularly compelling advocate to decide cases based on the skills of the advocates, rather than the merits of the case. We explore how frontier open- and closed-weights LLMs respond to legal arguments, reporting original experimental results examining how the quality of the advocate making those arguments affects the likelihood that a model will agree with a particular legal point of view, and exploring the factors driving these results. Our results have implications for the feasibility of adopting LLMs across legal and administrative settings.}, }