@incollection{Wang2022, title = {Using {{Pseudo-Labelled Data}} for~{{Zero-Shot Text Classification}}}, author = {Wang, Congcong and Nulty, Paul and Lillis, David}, translator = {Rosso, Paolo and Basile, Valerio and Mart{\'i}nez, Raquel and M{\'e}tais, Elisabeth and Meziane, Farid}, year = {2022}, series = {Natural {{Language Processing}} and {{Information Systems}}}, pages = {35--46}, publisher = {{Springer International Publishing}}, address = {{Cham}}, doi = {10.1007/978-3-031-08473-7_4}, abstract = {Existing Zero-Shot Learning (ZSL) techniques for text classification typically assign a label to a piece of text by building a matching model to capture the semantic similarity between the text and the label descriptor. This is expensive at inference time as it requires the text paired with every label to be passed forward through the matching model. The existing approaches to alleviate this issue are based on exact-word matching between the label surface names and an unlabelled target-domain corpus to get pseudo-labelled data for model training, making them difficult to generalise to ZS classification in multiple domains, In this paper, we propose an approach called P-ZSC to leverage pseudo-labelled data for zero-shot text classification. Our approach generates the pseudo-labelled data through a matching algorithm between the unlabelled target-domain corpus and the label vocabularies that consist of in-domain relevant phrases via expansion from label names. By evaluating our approach on several benchmarking datasets from a variety of domains, the results show that our system substantially outperforms the baseline systems especially in datasets whose classes are imbalanced.}, isbn = {978-3-031-08473-7}, }