diff --git "a/related_34K/test_related_short_2404.16816v1.json" "b/related_34K/test_related_short_2404.16816v1.json" new file mode 100644--- /dev/null +++ "b/related_34K/test_related_short_2404.16816v1.json" @@ -0,0 +1,1421 @@ +[ + { + "url": "http://arxiv.org/abs/2404.16816v1", + "title": "IndicGenBench: A Multilingual Benchmark to Evaluate Generation Capabilities of LLMs on Indic Languages", + "abstract": "As large language models (LLMs) see increasing adoption across the globe, it\nis imperative for LLMs to be representative of the linguistic diversity of the\nworld. India is a linguistically diverse country of 1.4 Billion people. To\nfacilitate research on multilingual LLM evaluation, we release IndicGenBench -\nthe largest benchmark for evaluating LLMs on user-facing generation tasks\nacross a diverse set 29 of Indic languages covering 13 scripts and 4 language\nfamilies. IndicGenBench is composed of diverse generation tasks like\ncross-lingual summarization, machine translation, and cross-lingual question\nanswering. IndicGenBench extends existing benchmarks to many Indic languages\nthrough human curation providing multi-way parallel evaluation data for many\nunder-represented Indic languages for the first time. We evaluate a wide range\nof proprietary and open-source LLMs including GPT-3.5, GPT-4, PaLM-2, mT5,\nGemma, BLOOM and LLaMA on IndicGenBench in a variety of settings. The largest\nPaLM-2 models performs the best on most tasks, however, there is a significant\nperformance gap in all languages compared to English showing that further\nresearch is needed for the development of more inclusive multilingual language\nmodels. IndicGenBench is released at\nwww.github.com/google-research-datasets/indic-gen-bench", + "authors": "Harman Singh, Nitish Gupta, Shikhar Bharadwaj, Dinesh Tewari, Partha Talukdar", + "published": "2024-04-25", + "updated": "2024-04-25", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Original Paper", + "paper_cat": "LLM Fairness", + "gt": "In the last few years, many multilingual LLMs have been developed\u2014starting from mBART (Liu et al., 2020) trained on 25 languages to LLMs that are pre-trained on hundreds of languages, such as mT55 (Xue et al., 2021), PaLM-2 (Anil et al., 2023), GPT-4 (Achiam et al., 2023), Gemini (Google, 2023), and others. These LLMs are typically evaluated on individual multilingual tasks for Translation: WMT (Farhad et al., 2021), FLORES (NLLB-Team et al., 2022); QuestionAnswering: XQuAD (Artetxe et al., 2020), TyDiQA (Clark et al., 2020), XorQA (Asai et al., 2021); Summarization: XLSUM (Hasan et al., 2021a), CrossSum (Bhattacharjee et al., 2023); Reasoning: MGSM (Shi et al., 2022), XCOPA (Ponti et al., 2020) to name a few, or on multilingual benchmarks such as, XTREME (Hu et al., 2020) and XTREME-UP (Ruder et al., 2023). However, most of these evaluation resources contain only a handful of languages or do not contain data for low resource languages, especially Indics. Besides, cross-lingual evaluation data is even more sparse. This work is an effort to bridge these gaps by releasing INDICGENBENCH, a suite of datasets covering diverse cross-lingual and multilingual generation tasks in Indic languages. Most work on creating evaluation data on Indic languages have focused on natural language understanding (NLU) tasks. Kakwani et al. (2020) and Doddapaneni et al. (2023) have released NLU test sets in Indic languages for a wide variety of tasks such as QA and NLI. Naamapadam (Mhaske et al., 2023) is a named entity recognition dataset specifically for Indic languages, MASSIVE (FitzGerald et al., 2022) is a slot-filling and intent classification dataset available in 7 Indic languages, IndicGLUE (Kakwani et al., 2020) is an NLU benchmark for 11 Indic languages, whereas GLUECoS (Khanuja et al., 2020) is a Hindi-English code-mixed benchmark, containing various NLU tasks. The Belebele Benchmark (Bandarkar et al., 2023) is a multiplechoice machine reading comprehension dataset for 122 languages of which 17 are Indic. On the other hand, INDICGENBENCH is a natural language generation (NLG) benchmark. Recently, there has been work in creating evaluation benchmarks for natural language generation (NLG) on Indic languages. IndicNLG Suite (Kumar et al., 2022), consisting of 5 NLG taks in 11 Indic languages, is a leap in this direction. These datasets in this suite are automatically created, either using data from the web (e.g., Wikipedia) or using translation systems. There are few works which create evaluation data for individual tasks in Indic languages. For example, IndicTrans2 (Gala et al., 2023) creates an n-way parallel dataset for machine translation in 22 scheduled Indian Languages, Mukhyansh (Madasu et al., 2023) and PMIndiaSum (Urlana et al., 2023) are headline generation datasets for 8 and 14 Indic languages respectively, and TeSum (Urlana et al., 2022) is an abstractive summarization dataset in the Telugu language. Ramesh et al. (2022) introduced Samanantar, a large translation dataset covering 11 Indic languages. Our work complements IndicNLGSuite and the other datasets in multiple ways. INDICGENBENCH is manually annotated ensuring highquality, noise-free text which is not typically found on the web. Our benchmark contains evaluation data for a much larger set of languages spanning low, medium and high resource. Our datasets are multi-language parallel enabling better comparison among different languages. Lastly, we focus on a complementary and challenging set of tasks, including cross-lingual summarization, cross-lingual and multilingual question answering, and translation.", + "pre_questions": [], + "main_content": "Introduction With the advances in generative language technologies powered by Large Language Models (LLMs; Brown et al., 2020; Rae et al., 2021; Chowdhery et al., 2022; OpenAI et al., 2023; Tay et al., 2023; Google, 2023), there has been a surge of interest in evaluating the multilingual capabilities of these models. Recent work (Ahuja et al., 2023a,b) shows a consistent performance gap between high resource languages and languages with Figure 1: Performance of state-of-the-art LLMs on different tasks in INDICGENBENCH. We observe a significant performance gap between English and Indic languages across LLMs. lower amounts of web resources available. To develop highly multilingual generative LLMs which should work equally well for 100s of languages spoken by billions of people in the world, it is crucial to evaluate their capabilities across a variety of languages to uncover performance gaps and guide future research. In this work we focus on India, a country with 1369 rationalized mother tongues spoken by more than a billion people.1 Making progress on language technologies for Indic languages will not only improve the state of affairs in this region, but will also provide valuable learning to the NLP community which will be applicable to other geographical regions and language families. There are has been much work from the community in building natural language understanding (NLU) models for Indic languages (Kakwani et al., 2020; Khanuja et al., 2021), as well as evaluation datasets (Doddapaneni et al., 2023; Mhaske et al., 2023) to support such models. In this work, our focus is to develop 1https://en.wikipedia.org/wiki/Languages_of_India arXiv:2404.16816v1 [cs.CL] 25 Apr 2024 Task Language Input Output #Languages Dataset Size (H / M / L) (Train / Dev / Test) CROSSSUM-IN (Cross-lingual Summarization) Hindi 9 / 7 / 13 2.9k / 2.9k / 14.5k FLORES-IN (Machine Translation) Konkani 9 / 7 / 13 / 28.9k / 29.3k XQUAD-IN (Multilingual QA) Punjabi 9 / 3 / 1.2k / 1.2k / 14.3k XORQA-IN-XX (Cross-lingual QA) Telugu 9 / 6 / 13 2.8k / 14k / 15.1k XORQA-IN-EN (Cross-lingual QA) Santali 9 / 6 / 13 2.8k / 14k / 15.1k Table 1: INDICGENBENCH, our proposed benchmark, consists of five tasks: Cross-lingual Summarization (CROSSSUM-IN), Machine Translation (FLORES-IN), Multilingual QA (XQUAD-IN) and Cross-lingual QA (XORQA-IN-XX and XORQA-IN-EN). An example from each task, the number of languages for which we collect evaluation data (divided by resourcefulness, higher (H), medium (M) and low (L)), and the number of training/validation/test instances per task is shown above. See Section 2 for details. a high-quality benchmark for evaluating generative language capabilities in a variety of Indic languages across various levels of resourcefulness. We release INDICGENBENCH, a multilingual, multi-way parallel benchmark for measuring language generation capabilities across diverse userfacing tasks in 29 Indic languages across 4 language families (Table 7). INDICGENBENCH extends existing benchmarks such as CrossSum (Bhattacharjee et al., 2023), XQuAD (Artetxe et al., 2020), XorQA (Asai et al., 2021), and FLORES (NLLB-Team et al., 2022) for additional Indic languages and is composed of tasks like cross-lingual summarization (CROSSSUM-IN), machine translation (FLORES-IN), cross-lingual reading comprehension (XORQA-IN-XX and XORQAIN-EN) and multilingual reading comprehension (XQUAD-IN). Each dataset consists of parallel examples in up to 29 low to comparatively higher resource Indic languages; and for some tasks (e.g. CROSSSUM-IN), INDICGENBENCH provides the first-ever evaluation datasets for as many as 18 of these languages. We also release a small training set in all tasks for efficient adaptation of LLMs. Our comprehensive evaluation of various state-ofthe-art proprietary and open-source LLMs on INDICGENBENCH shows that there is a significant gap in performance between English and Indic languages (see Figure 1). Our contributions are as follows: \u2022 Created and released INDICGENBENCH, a high quality text benchmark in diverse language generation tasks like summarization, question-answering, and translation across 29 Indic languages. INDICGENBENCH is the largest generation benchmark for Indic languages. \u2022 Comprehensive experimentation on SoTA LLMs (mT5, Gemma, BLOOM, LLaMA, GPT-3.5, GPT-4, PaLM-2) across various model sizes and training settings to benchmark their Indic language generation capabilities. \u2022 A qualitative analysis for assessing the gaps in current language technologies and define potential directions of future research. 2 INDICGENBENCH INDICGENBENCH is a high-quality, humancurated benchmark to evaluate text generation capabilities of multilingual models on Indic languages. Our benchmark consists of 5 user-facing tasks (viz., summarization, machine translation, and question answering) across 29 Indic languages spanning 13 writing scripts and 4 language families. For certain tasks, INDICGENBENCH provides the first-ever evaluation dataset for up to 18 Indic languages. Table 1 provides summary of INDICGENBENCH and examples of instances across tasks present in it. Languages in INDICGENBENCH are divided into (relatively) Higher, Medium, and Low resource categories based on the availability of web text resources (see appendix \u00a7A for details).2 Higher (9): Bengali, Gujarati, Hindi, Kannada, Malayalam, Marathi, Tamil, Telugu, Urdu Medium (7): Assamese, Bhojpuri, Nepali, Odia, Punjabi, Pashto, Sanskrit Low (13): Awadhi, Haryanvi, Tibetan, Garhwali, Konkani, Chhattisgarhi, Rajasthani, Maithili, Manipuri, Malvi, Marwari, Santali, Bodo As evident from the lists above, our benchmark provides a broad-coverage over languages with respect to their resourcedness, allowing users to evaluate language models on relatively highresource languages such as Hindi and extremely low-resource languages such as Manipuri in Meitei script on a single benchmark. To curate the evaluation datasets for our benchmark, we use the following existing datasets as the source: CrossSum (Bhattacharjee et al., 2023) for cross-lingual summarization, FLORES (NLLB-Team et al., 2022) for machine translation, XQuAD (Artetxe et al., 2020) for multilingual QA, and XoRQA (Asai et al., 2021) for cross-lingual QA. From each of these datasets we select a subset of English examples to be a part of our benchmark, and then collect professional human translations for these examples in all target Indic languages. Some target languages are already covered by the source datasets in which case we re-purpose this existing data and only collect translations for the remaining languages. We also 2We note that the languages called relatively higher resource in this paper, e.g., Hindi or Bengali, are in fact mid-low Web resource when compared to English and other truly high resource languages. For example, using Wikipedia as a proxy for language resources, compared to 6.6M+ Wikipedia articles in English, there are only 160K Hindi Wikipedia articles. collect and release a small amount of training and validation data making possible evaluation of training techniques like fine-tuning, parameter-efficient training, in-context learning, and others. Why extend existing benchmarks? We chose to collect human translations of existing benchmarks as opposed to creating evaluation data from scratch due to various reasons: \u2022 Translation-based extension of existing benchmark results in multi-way parallel data, allowing researchers to attribute performance due to task knowledge vs. language understanding, and measure cross-lingual generalization \u2022 For many low-resource languages in INDICGENBENCH, clean text knowledge corpus (e.g., Wikipedia) is not available making it difficult to acquire source data for annotation \u2022 By focusing only on translation quality in the target Indic languages, we are able to leverage the quality control that went into designing the source benchmarks. Annotators were professional data labelers working as contractors at our organization and with a vendor. Annotators were paid competitive rates in compliance with applicable labor laws and prevailing market rates. Our pay rate to annotators varied across languages, ranging from USD 2.80 per hour for Pashto to USD 15.90 per hour for Tibetan. Cross-Lingual Summarization: CROSSSUM-IN We create CROSSSUM-IN based on CrossSum (Bhattacharjee et al., 2023), a dataset for crosslingual summarization, which in turn is derived from XL-Sum (Hasan et al., 2021b). CrossSum contains multi-way parallel data in 45 languages where BBC news articles as source in a language are paired with corresponding summaries in other languages. Based on their matching criteria, different languages have different amount of sourcetarget pairs. We sample 700 English article-summary pairs (100 each from train/dev and 500 from test) and ask human translators to translate the English summary into the target Indic languages. CrossSum already contains data for 9 of our 29 target languages; for these languages we sample 100/100/500 examples from the original dataset to maintain equity with other languages we collect data for. CROSSSUMIN contains a total of 20.3k examples across 29 Indic languages in our benchmark. Machine Translation: FLORES-IN FLORES-200 (NLLB-Team et al., 2022) is a human-annotated multi-way parallel machine translation (MT) benchmark for 200 languages where the same source English sentences are translated by humans into the target 200 languages. It contains data in 22 of our 29 target languages; we extend this by collecting human translations for the remaining 7 new languages leading to a MT benchmark in 29 Indic languages which we call FLORES-IN. FLORES-200 is divided into three splits: dev (997), devtest (1012), test (992), of which the test set it not public. We collect translations for all 997 dev and 1012 devtest sentences, yielding 2009 sentences per language. Collectively, FLORES-IN contains 58.2k examples across 29 Indic languages. Multilingual Question-Answering: XQUAD-IN We create an Indic Multilingual Question Answering task XQUAD-IN based on the multilingual reading comprehension dataset XQuAD (Artetxe et al., 2020). XQuAD is in turn derived from the SQuAD dataset (Rajpurkar et al., 2016), in which an English Wikipedia passage is paired with multiple question-answer (QA) pairs where the answers are short spans for the given passage. The authors of XQuAD collected human translations for 240 passages and 1190 QA pairs from the SQuAD v1.1 development set into 10 higher resource languages (Hindi being the only Indic language). To create XQUAD-IN, we use the 240 passages and 1190 QA pairs from XQuAD as our test set. We additionally selected 20 passages and 100 QA pairs from the original SQuAD v1.1 training and development sets each to create our training and development set. For all the 280 passages and 1390 QA pairs we collect professional human translations in 12 Indic languages.3 Overall, XQUAD-IN contains 3.3k passages and 16.6k QA pairs in 12 Indic languages. Cross-Lingual Question-Answering: XORQA-IN We create Indic Cross-lingual Question-Answering dataset XORQA-IN based on the XOR-TYDI QA dataset (Asai et al., 2021). XOR-TYDI contains questions in non-English languages paired with English evidence passages and short span answers from those passages (similar to SQuAD). It was 3XQUAD-IN contains all 9 higher-resource languages (see \u00a72) and 3 medium-resources languages, namely, Assamese, Odia, and Punjabi. created with the idea of developing NLP systems that can answer questions in users\u2019 native language by refering to sources in a high-resource language, such as English, which was more likely to contain the answer due to the information scarcity of lowresources languages on the web. The original XORTYDI contains data in 7 languages out of which Bengali and Telugu are the two Indic languages. To create XORQA-IN, we select the 302 Bengali and 237 Telugu examples (Bn/Te-question, Enpassage, En-answer) from the XOR-TYDI dev set as our test data.4 Additionally, we sample 600 examples (equally from Bengali and Telugu) from the training set of XOR-TYDI to create our training (100) and development (500) set. We then follow a two-staged translation process, where we first ask the human translators to translate the Bengali or Telugu question (Bn/Te-question) into English (En-question). In the second stage, we collect translations for these English questions (En-question) into target languages (Xx-question) and translations for the English answers (En-answer) into the target languages (Xx-answer). We create two tasks from this translated data: 1. XORQA-IN-EN: Each example contains (Xxquestion, En-passage, En-answer). This task is similar to the XOR-TYDI dataset in additional Indic languages. 2. XORQA-IN-XX: Each example contains (Xxquestion, En-passage, Xx-answer), where the task is to generate the answer in the same language as the question. We collect data for 28 Indic languages resulting in 32k examples.5 3 Experiments and Analysis We use INDICGENBENCH to benchmark multilingual and cross-lingual language generation capabilities of various LLMs on Indic languages. We perform experiments with a variety of open-source LLMs \u2014 mT5 (Xue et al., 2021), LLaMA (Touvron et al., 2023),6, BLOOMZ (Workshop et al., 2022), Gemma (Team et al., 2024); and proprietary LLMs \u2014 GPT-3.5, GPT-4 (OpenAI et al., 2023), and PaLM-2 (Anil et al., 2023). We compare and analyze the performance of these LLMs and their variants in terms of model sizes under different learning paradigms and set4XOR-TYDI has not publicly released its test set. 5We do not collect translations for Nepali. 6LLaMA-2 could not be used due to a restrictive licence Model (LLM) CROSSSUM-IN FLORES-IN XQUAD-IN XORQA-IN-XX XORQA-IN-EN Eval. Metric ChrF ChrF Token-F1 Token-F1 Token-F1 (enxx / xxen) Performance in English GPT-4 30.3 \u2013 / \u2013 64.8 \u2013 37.9 PaLM-2-L 41.1 \u2013 / \u2013 83.7 \u2013 71.4 Average Performance on INDICGENBENCH LLaMA-7B 3.7 11.5 / 21.6 3.8 7.4 10.4 LLaMA-13B 4.1 13.3 / 24.1 4.5 10.4 12.1 LLaMA-65B 4.6 18.1 / 32.7 7.1 16.5 16.3 BLOOM-7B 3.8 18.3 / 31.2 13.8 7.9 23.6 BLOOMZ-7B 1.2 40.8 / 48.4 53.7 7.0 49.0 Gemma-7B-PT 0.0 32.1 / 50.4 0.5 11.7 23.8 Gemma-7B-IT 11.6 18.6 / 29.2 35.3 13.5 24.8 GPT-3.5 16.3 29.2 / 47.7 33.2 21.6 35.5 GPT-4 17.6 32.1 / 54.5 55.7 23.4 46.0 PaLM-2-XXS 7.2 24.0 / 43.4 34.6 13.5 36.8 PaLM-2-XS 15.5 40.7 / 58.3 62.2 29.5 47.8 PaLM-2-S 18.5 43.5 / 61.6 66.7 31.6 57.4 PaLM-2-L 21.2 47.5 / 65.1 69.3 37.4 55.9 Table 2: One-shot performance on INDICGENBENCH across model sizes for all LLMs considered in our work (\u00a73.1). For each LLM family performance improves with increasing model size, with PaLM-2-L performing the best across most tasks. Compared to English, all models under-perform significantly highlighting shortcomings of current SoTA LLMs. See Section 3.1 for details. tings. We first evaluate model performance on one-shot prompting (\u00a73.1) and also measure performance across language categories based on resourcedness (\u00a73.2). We then evaluate the effect of number of in-context examples shown to the model as supervised data (\u00a73.3) and the effect of prompting in a higher-resource language such an English or Hindi (\u00a73.4). Using the training data contained in INDICGENBENCH, we measure how the performance of LLMs after fine-tuning compares with few-shot prompting (\u00a73.5). Finally, we perform qualitative analysis of models on INDICGENBENCH and highlight some areas of improvement for future model development (\u00a73.7). Evaluation Metrics For the cross-lingual summarization and translation tasks, CROSSSUM-IN and FLORES-IN, we report Character-F1 (ChrF) metric (Popovi\u00b4 c, 2015) since token-level metrics like ROUGE and BLEU are not reliable for lowresource languages (Bapna et al., 2022). To stay consistent with existing literature on QA tasks, we report SQuAD-style Token-F1 on our XQUAD-IN and XORQA-IN QA tasks. On FLORES-IN, we report translation performance in both directions\u2014translating from English to the target language (enxx) and vice-versa (xxen). 3.1 Comparison of LLMs on INDICGENBENCH In Table 2 we evaluate LLaMA, BLOOMZ, Gemma, GPT and PaLM-2 family of models on all tasks of INDICGENBENCH in a one-shot prompted setting. Numbers are averaged across all languages in the evaluation data. To compare, we also report English performance for GPT-4 and PaLM-2-L. We see across tasks that larger models from the same LLM family perform better. PaLM2-L performs the best among all LLMs considered, except for the XORQA-IN-EN task where PaLM-2-S performs slightly better. We find that open source LLaMA models perform much worse compared to proprietary models; even the largest LLaMA-65B model significantly underperforms the smallest PaLM-2-XXS model. Gemma7B instruction tuned model performs better than LLaMA-13B as well as LLaMA-65B on most tasks. BLOOMZ, which is an instruction tuned version of BLOOM (Workshop et al., 2022), pre-trained on large-scale multilingual data, works the best on three out of five tasks in INDICGENBENCH. On CROSSSUM-IN and XORQA-IN-XX it falls behind LLaMA and Gemma. Compared to English, we see significant room for improvement (20+ ChrF or Token-F1 points) across all tasks. 3.2 Performance across language categories In Table 3 we report one-shot performance across language categories defined in Section 2. We only show performance for Gemma-7B-IT, BLOOMZ7B, LLaMA-65B, GPT-4 and PaLM-2-L models here and report performance for the other models in appendix B.1. We find that there is a significant performance drop going from higher resourced languages to medium resourced ones, and further drop in lower resourced languages. We would like to point out two observations here: (a) In FLORES-IN, the performance for translating English to the target language (enxx) drops significantly from higher to lower resourced languages (56.9 \u219241.9 for PaLM-2-L) whereas the performance in the xxen direction does not fall this drastically (68.2 \u219262.6). A similar trend is seen when comparing XORQA-IN-XX and XORQA-IN-EN. This highlights that current LLMs are better at unCROSSSUM-IN FLORES-IN (enxx / xxen) XQUAD-IN XORQA-IN-XX XORQA-IN-EN Model High Medium Low High Medium Low High Medium High Medium Low High Medium Low LLaMA-65B 4.4 4.6 4.7 18.2 / 31.5 15.4 / 30.0 19.5 / 35.0 8.8 1.9 17.7 13.5 17.1 16.4 14.0 17.3 Gemma-7B-IT 13.9 11.5 10.0 17.6 / 33.7 15.0 / 26.1 21.3 / 27.7 38.8 24.8 18.9 8.3 12.2 29.5 23.9 21.9 BLOOMZ-7B 1.5 1.7 0.6 67.7 / 59.1 39.4 / 50.2 22.9 / 40.0 55.5 48.1 10.8 2.8 6.2 64.7 45.8 39.5 GPT-4 19.4 17.9 16.3 36.2 / 59.6 30.7 / 55.2 29.9 / 50.5 56.1 54.6 25.8 21.6 22.6 49.4 50.0 41.8 PaLM-2-L 25.2 23.1 17.5 56.9 / 68.2 45.9 / 65.6 41.9 / 62.6 72.5 59.8 41.9 36.7 34.6 57.3 57.9 53.9 Table 3: One-shot performance across language categories based on resourcedness defined in Section 2. For all tasks, we witness significantly lower performances in medium and low resource languages compared to the higher resource ones. Please see Table 9 in appendix B.1 for results on other models. See Section 3.2 for more details. derstanding these lower-resourced languages than generating fluent text in them. (b) In few cases, we see smaller performance deltas between medium and lower resourced languages compared to higher and medium categories. From our analysis, this can mainly be attributed to many languages in the lower category being similar to Hindi and written in the same Devanagari script. FLORES-IN XORQA-IN-XX Model (LLM) 0 1 5 0 1 2 3 LLaMA-7B 8.0 11.5 11.4 5.0 7.4 9.0 9.2 LLaMA-13B 8.6 13.3 13.4 6.3 10.4 12.2 13.1 LLaMA-65B 14.0 18.1 18.3 12.3 16.5 18.7 19.4 PaLM-2-XXS 0.8 24.0 26.9 8.9 13.5 15.8 17.5 PaLM-2-XS 20.1 40.7 42.3 21.4 29.5 32.2 33.2 PaLM-2-S 24.9 43.5 45.2 22.7 31.6 33.4 35.4 PaLM-2-L 31.1 47.5 49.3 31.9 37.4 39.7 41.1 Table 4: Performance by varying number of incontext exemplars for LLaMA and PaLM-2 models on FLORES-IN (enxx) and XORQA-IN-XX tasks (\u00a73.3). Performance improves with increasing amounts of supervision provided in-context. Refer appendix B.2 for results on other tasks and models. 3.3 In-context learning on INDICGENBENCH In this section we aim to understand the impact of the number of in-context examples shown to the LLM during few-shot prompting. Since CROSSSUM-IN and XQUAD-IN input passages are long, we are only able to perform 0-and-1-shot prompting. For XORQA-IN-XX and XORQA-IN-EN we perform 0-to-3-shot prompting, and for FLORES-IN we perform 0, 1 and 5-shot prompting. We show performance for FLORES-IN and XORQA-IN-XX in Table 4. Other results are shown in appendix B.5 due to space limitations. Across model families and sizes we observe that increasing the amount of supervision in terms of the in-context examples improves performance. 3.4 Transfer from high-resource languages For languages with no supervised data, one option to improve performance is utilizing existing supervised data another language as in-context exemplars. In this section we aim to study if the language in which the model is prompted plays a role in performance. In Table 5 we show performance when the model is prompted in English vs. Hindi, a representative higher resourced Indic language. For comparison, we also show performance when the in-context exemplar is in the same language as the test instance. We find that Hindi in-context exemplars are much more useful for all models as compared to their English counterparts. Surprisingly, for smaller models, performance with Hindi exemplars comes extremely close to prompting in the test language, even better sometimes. 3.5 Fine-tuning LLMs on INDICGENBENCH and Comparison with In-Context Learning As outlined in Section 2, we also release a small, high-quality training set for all tasks in INDICGENBENCH (except FLORES-IN which only has dev and test sets). This training data can be used to adapt LLMs to downstream tasks in Indic languages via fine-tuning and other training techniques. Table 6 shows our results of fine-tuning mT5 and PaLM-2 models and their comparison with incontext learning using PaLM-2. We fine-tune each model on training data from all available languages including English, use the development set for early stopping, and report numbers on the test set. For question-answering tasks that require generating short spans as answers, we find that older generation mT5 models significantly outperform smaller CROSSSUM-IN XQUAD-IN XORQA-IN-XX XORQA-IN-EN Model (1-Shot Lang) Higher Medium Low Higher Medium Higher Medium Low Higher Medium Low PaLM-2-XXS (En) 0.3 0.1 0.3 38.5 31.9 14.0 5.4 7.3 40.3 35.0 30.8 PaLM-2-XXS (Hi) 1.3 2.1 3.7 39.8 33.3 17.6 8.5 10.5 45.5 39.4 31.9 PaLM-2-XXS (Lang) 7.7 7.6 6.7 37.2 26.8 17.7 8.8 12.8 43.6 38.3 31.5 PaLM-2-XS (En) 0.3 0.2 0.5 64.3 62.2 30.6 23.9 20.8 35.9 32.1 27.2 PaLM-2-XS (Hi) 3.5 5.5 9.9 65.4 63.5 33.2 25.8 22.7 49.3 46.8 40.7 PaLM-2-XS (Lang) 18.4 16.4 13.0 65.1 53.3 35.8 27.6 26.1 53.3 51.5 42.2 PaLM-2-S (En) 0.4 0.2 0.5 67.4 66.8 27.5 19.9 19.9 48.6 47.1 40.8 PaLM-2-S (Hi) 4.4 6.9 13.2 68.5 67.5 34.2 27.0 24.9 58.3 57.0 49.0 PaLM-2-S (Lang) 22.4 19.8 15.1 69.9 57.3 36.6 30.3 28.6 60.1 61.4 53.6 PaLM-2-L (En) 0.4 0.2 0.6 71.7 69.8 37.7 33.2 29.7 28.7 27.5 26.2 PaLM-2-L (Hi) 4.7 7.0 13.8 72.6 71.0 39.7 34.6 31.2 45.5 44.8 41.5 PaLM-2-L (Lang) 25.2 23.1 17.5 72.5 59.8 41.9 36.7 34.6 57.3 57.9 53.9 Table 5: Effect of in-context exemplar language (\u00a73.4): Performance comparison when the one-shot exemplar is provided in English (En) or Hindi (Hi) as opposed to the language of the test instance (Lang). In-context prompting in the test language (Lang) provides the best performance, followed by Hindi (Hi) and then English (En). This follows the same order as relatedness between test and prompting language, highlighting the benefit of prompting in a language more related to the test language (e.g., Hindi compared to English in this case). CROSSSUM-IN XQUAD-IN XORQA-IN-XX XORQA-IN-EN Model Higher Medium Low Higher Medium Higher Medium Low Higher Medium Low mT5 models \u2013 Fine-Tuned mT5-B 19.5 18.9 15.1 46.2 30.9 3.8 4.0 5.5 31.7 31.4 30.8 mT5-L 20.5 19.9 15.5 54.3 38.6 11.8 11.0 10.4 56.8 53.7 45.4 mT5-XL 22.7 21.1 15.3 57.4 40.5 20.7 13.5 15.6 58.2 56.2 46.5 mT5-XXL 25.9 24.2 10.4 62.0 44.4 28.8 23.6 21.9 70.3 68.9 59.1 PaLM-2 models Fine-Tuned PaLM-2-XXS 22.5 19.7 16.5 41.2 18.1 18.1 10.9 12.9 60.2 56.9 50.9 PaLM-2-XS 28.5 25.6 18.8 40.2 16.9 30.4 23.6 19.6 69.1 66.6 56.6 PaLM-2 models Few-shot prompted PaLM-2-XXSF S 7.7 7.6 6.7 37.2 26.8 22.7 12.3 16.4 51.6 47.1 38.4 PaLM-2-XSF S 18.4 16.4 13.0 65.1 53.3 39.2 32.0 29.5 67.0 65.3 56.5 Table 6: (Top) Fine-tuning performance of mT5 and PaLM-2 models (\u00a73.5). Bold represents best numbers among fine-tuned models. PaLM-2 outperforms mT5 for longer-form generation task (CROSSSUM-IN), whereas mT5 models do well on short answer-span QA tasks. (Bottom) Comparison of in-context learning vs. fine-tuning on PaLM-2 models. In Green , we highlight the best PaLM-2 number (among fine-tuned and few-shot). For CROSSSUM-IN task requiring longer-form generation, fine-tuning outperforms few-shot prompting. PaLM-2 models in most cases.7 On CROSSSUMIN which requires generating a longer summary, we find that PaLM-2 models are more effective. For Question-Answering tasks, as the model size increases from PaLM-2-XXS to PaLM-2-XS, we see that in-context learning yields equal or better performance compared to fine-tuning the model. For example, in XORQA-IN-XX, as the model size increases from XXS to XS, we see that the gap between few-shot prompting and fine-tuning sig7Since the parameter count for PaLM-2 models is not public, we cannot attribute this performance difference to model sizes. nificantly increases from 2-4% (in XXS) to 9-10% (in XS). In the case of XQUAD-IN, we see that for the larger PaLM-2-XS model, its much better to perform in-context learning as compared to finetuning, for both medium and high resource Indic languages. For XORQA-IN-EN, in-context learning reaches the fine-tuning performance as model size increases to PaLM-2-XS. For the CROSSSUMIN, the gap between fine-tuning and in-context learning is reducing as model size increases, which reinforces that for even larger model sizes, it might be better to learn in-context. 3.6 Analyzing Tokenizer across Indic languages Figure 2: Tokenizer fertility for different languages using OpenAI\u2019s Byte Pair Encoding. We note that midlow resource languages suffer from high token fertility. (Section 3.6) Figure 3: Percentage of in-context XQUAD-IN exemplars that fit in a 1920 token context window. Midlow resource languages\u2019 high token fertility (Figure 2) makes it impossible to perform few-shot prompting in these languages. (Section 3.6) In Figure 2, we compare the token fertility (average number of sub-words that a word is broken down into by the tokenizer) across all Indic langugaes in INDICGENBENCH.8 We find that the token fertility varies significantly across languages; from 4.1 for Pashto to 19.9 for Tibetan. A high token fertility is undesirable and can disproportionately effect a particular language\u2019s performance. For languages where text is broken into more number of tokens, fewer in-context examples 8We use OpenAI\u2019s BPE tokenizer (platform.openai.com/tokenizer). PaLM-2 tokenizer is not publicly available. can be input to the LLM during inference. This can negatively impact performance (see Table 4). In Figure 3, we show how the percentage of data that fits in a particular context length changes with number of in-context examples for various languages. For example, we see in Figure 3 that for medium resource languages with high token-fertility like Oriya and Punjabi we can in-corporate much fewer in-context examples, compared to Indic languages with lower token-fertility like Hindi and Marathi. 3.7 Qualitative Analysis We manually analyze predictions from the best performing model PaLM-2-L with the aim to understand the shortcomings of current LLMs and highlight areas of improvements for future research. We randomly select 20 examples each in the CROSSSUM-IN and FLORES-IN tasks for the following languages which are reviewed by native speakers: Awadhi, Haryanvi, Chhatisgarhi, Konkani, and Assamese. We found the following patterns of errors: Generation in a related language The languages Awadhi, Haryanvi, and Chhatisgarhi are related to a higher resource language Hindi and written in the same script Devanagari. We find that the model generates mixed-language output with words mixed from Hindi and also outputs incorrectly inflected forms of the main verbs in the output. We show couple of examples of this phenomena in Figure 5a in the appendix. Hallucination and Missing Information In the cross-lingual summarization task CROSSSUM-IN, we find that the model often outputs extra information that is not present in the source article. In translation, we have observed examples where come crucial information from the source sentence is missing from the generated output. Also, in some cases, the model fails to understand polysemous English words and generates translation for the incorrect sense. We show examples of these phenomena in Figures 4a, 4b, and 5b in the appendix. We release INDICGENBENCH, the largest benchmark for evaluating LLMs on 5 user-facing generation tasks across 29 Indic languages, providing evaluation data for many under-represented Indic languages for the first time. INDICGENBENCH is broad coverage along many dimensions \u2013 it covers 13 writing scripts, 4 language families, and spans languages across the available web resource spectrum. We carry out extensive comparison of current SoTA LLMs on INDICGENBENCH and highlight areas for future improvement. We are hopeful INDICGENBENCH will play an important role in further development of LLMs in Indic languages ultimately benefiting a billion-plus population. 6 Limitations Since INDICGENBENCH extends existing benchmarks to new Indic languages through human translation, it may miss some India-specific entities and linguistic nuances. Future work can explore translocalization for creating improved evaluation and fine-tuning. INDICGENBENCH doesn\u2019t cover longform generation and reasoning tasks. Creating such datasets is part of our future work. 7 Acknowledgments We thank Aditi Chaudhury, Ashok Popat, Shachi Dave, Sagar Gubbi, Megh Umekar and members of the Languages team at Google Research India (GRI) for providing feedback on this work. The authors would like to thank Manish Gupta and Divy Thakkar for their support and guidance." + }, + { + "url": "http://arxiv.org/abs/2003.11080v5", + "title": "XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization", + "abstract": "Much recent progress in applications of machine learning models to NLP has\nbeen driven by benchmarks that evaluate models across a wide variety of tasks.\nHowever, these broad-coverage benchmarks have been mostly limited to English,\nand despite an increasing interest in multilingual models, a benchmark that\nenables the comprehensive evaluation of such methods on a diverse range of\nlanguages and tasks is still missing. To this end, we introduce the\nCross-lingual TRansfer Evaluation of Multilingual Encoders XTREME benchmark, a\nmulti-task benchmark for evaluating the cross-lingual generalization\ncapabilities of multilingual representations across 40 languages and 9 tasks.\nWe demonstrate that while models tested on English reach human performance on\nmany tasks, there is still a sizable gap in the performance of cross-lingually\ntransferred models, particularly on syntactic and sentence retrieval tasks.\nThere is also a wide spread of results across languages. We release the\nbenchmark to encourage research on cross-lingual learning methods that transfer\nlinguistic knowledge across a diverse and representative set of languages and\ntasks.", + "authors": "Junjie Hu, Sebastian Ruder, Aditya Siddhant, Graham Neubig, Orhan Firat, Melvin Johnson", + "published": "2020-03-24", + "updated": "2020-09-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2305.10403v3", + "title": "PaLM 2 Technical Report", + "abstract": "We introduce PaLM 2, a new state-of-the-art language model that has better\nmultilingual and reasoning capabilities and is more compute-efficient than its\npredecessor PaLM. PaLM 2 is a Transformer-based model trained using a mixture\nof objectives. Through extensive evaluations on English and multilingual\nlanguage, and reasoning tasks, we demonstrate that PaLM 2 has significantly\nimproved quality on downstream tasks across different model sizes, while\nsimultaneously exhibiting faster and more efficient inference compared to PaLM.\nThis improved efficiency enables broader deployment while also allowing the\nmodel to respond faster, for a more natural pace of interaction. PaLM 2\ndemonstrates robust reasoning capabilities exemplified by large improvements\nover PaLM on BIG-Bench and other reasoning tasks. PaLM 2 exhibits stable\nperformance on a suite of responsible AI evaluations, and enables\ninference-time control over toxicity without additional overhead or impact on\nother capabilities. Overall, PaLM 2 achieves state-of-the-art performance\nacross a diverse set of tasks and capabilities.\n When discussing the PaLM 2 family, it is important to distinguish between\npre-trained models (of various sizes), fine-tuned variants of these models, and\nthe user-facing products that use these models. In particular, user-facing\nproducts typically include additional pre- and post-processing steps.\nAdditionally, the underlying models may evolve over time. Therefore, one should\nnot expect the performance of user-facing products to exactly match the results\nreported in this report.", + "authors": "Rohan Anil, Andrew M. Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, Eric Chu, Jonathan H. Clark, Laurent El Shafey, Yanping Huang, Kathy Meier-Hellstern, Gaurav Mishra, Erica Moreira, Mark Omernick, Kevin Robinson, Sebastian Ruder, Yi Tay, Kefan Xiao, Yuanzhong Xu, Yujing Zhang, Gustavo Hernandez Abrego, Junwhan Ahn, Jacob Austin, Paul Barham, Jan Botha, James Bradbury, Siddhartha Brahma, Kevin Brooks, Michele Catasta, Yong Cheng, Colin Cherry, Christopher A. Choquette-Choo, Aakanksha Chowdhery, Cl\u00e9ment Crepy, Shachi Dave, Mostafa Dehghani, Sunipa Dev, Jacob Devlin, Mark D\u00edaz, Nan Du, Ethan Dyer, Vlad Feinberg, Fangxiaoyu Feng, Vlad Fienber, Markus Freitag, Xavier Garcia, Sebastian Gehrmann, Lucas Gonzalez, Guy Gur-Ari, Steven Hand, Hadi Hashemi, Le Hou, Joshua Howland, Andrea Hu, Jeffrey Hui, Jeremy Hurwitz, Michael Isard, Abe Ittycheriah, Matthew Jagielski, Wenhao Jia, Kathleen Kenealy, Maxim Krikun, Sneha Kudugunta, Chang Lan, Katherine Lee, Benjamin Lee, Eric Li, Music Li, Wei Li, YaGuang Li, Jian Li, Hyeontaek Lim, Hanzhao Lin, Zhongtao Liu, Frederick Liu, Marcello Maggioni, Aroma Mahendru, Joshua Maynez, Vedant Misra, Maysam Moussalem, Zachary Nado, John Nham, Eric Ni, Andrew Nystrom, Alicia Parrish, Marie Pellat, Martin Polacek, Alex Polozov, Reiner Pope, Siyuan Qiao, Emily Reif, Bryan Richter, Parker Riley, Alex Castro Ros, Aurko Roy, Brennan Saeta, Rajkumar Samuel, Renee Shelby, Ambrose Slone, Daniel Smilkov, David R. So, Daniel Sohn, Simon Tokumine, Dasha Valter, Vijay Vasudevan, Kiran Vodrahalli, Xuezhi Wang, Pidong Wang, Zirui Wang, Tao Wang, John Wieting, Yuhuai Wu, Kelvin Xu, Yunhan Xu, Linting Xue, Pengcheng Yin, Jiahui Yu, Qiao Zhang, Steven Zheng, Ce Zheng, Weikang Zhou, Denny Zhou, Slav Petrov, Yonghui Wu", + "published": "2023-05-17", + "updated": "2023-09-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.05437v2", + "title": "IndicNLG Benchmark: Multilingual Datasets for Diverse NLG Tasks in Indic Languages", + "abstract": "Natural Language Generation (NLG) for non-English languages is hampered by\nthe scarcity of datasets in these languages. In this paper, we present the\nIndicNLG Benchmark, a collection of datasets for benchmarking NLG for 11 Indic\nlanguages. We focus on five diverse tasks, namely, biography generation using\nWikipedia infoboxes, news headline generation, sentence summarization,\nparaphrase generation and, question generation. We describe the created\ndatasets and use them to benchmark the performance of several monolingual and\nmultilingual baselines that leverage pre-trained sequence-to-sequence models.\nOur results exhibit the strong performance of multilingual language-specific\npre-trained models, and the utility of models trained on our dataset for other\nrelated NLG tasks. Our dataset creation methods can be easily applied to\nmodest-resource languages as they involve simple steps such as scraping news\narticles and Wikipedia infoboxes, light cleaning, and pivoting through machine\ntranslation data. To the best of our knowledge, the IndicNLG Benchmark is the\nfirst NLG benchmark for Indic languages and the most diverse multilingual NLG\ndataset, with approximately 8M examples across 5 tasks and 11 languages. The\ndatasets and models are publicly available at\nhttps://ai4bharat.iitm.ac.in/indicnlg-suite.", + "authors": "Aman Kumar, Himani Shrotriya, Prachi Sahu, Raj Dabre, Ratish Puduppully, Anoop Kunchukuttan, Amogh Mishra, Mitesh M. Khapra, Pratyush Kumar", + "published": "2022-03-10", + "updated": "2022-10-27", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1910.11856v3", + "title": "On the Cross-lingual Transferability of Monolingual Representations", + "abstract": "State-of-the-art unsupervised multilingual models (e.g., multilingual BERT)\nhave been shown to generalize in a zero-shot cross-lingual setting. This\ngeneralization ability has been attributed to the use of a shared subword\nvocabulary and joint training across multiple languages giving rise to deep\nmultilingual abstractions. We evaluate this hypothesis by designing an\nalternative approach that transfers a monolingual model to new languages at the\nlexical level. More concretely, we first train a transformer-based masked\nlanguage model on one language, and transfer it to a new language by learning a\nnew embedding matrix with the same masked language modeling objective, freezing\nparameters of all other layers. This approach does not rely on a shared\nvocabulary or joint training. However, we show that it is competitive with\nmultilingual BERT on standard cross-lingual classification benchmarks and on a\nnew Cross-lingual Question Answering Dataset (XQuAD). Our results contradict\ncommon beliefs of the basis of the generalization ability of multilingual\nmodels and suggest that deep monolingual models learn some abstractions that\ngeneralize across languages. We also release XQuAD as a more comprehensive\ncross-lingual benchmark, which comprises 240 paragraphs and 1190\nquestion-answer pairs from SQuAD v1.1 translated into ten languages by\nprofessional translators.", + "authors": "Mikel Artetxe, Sebastian Ruder, Dani Yogatama", + "published": "2019-10-25", + "updated": "2020-05-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2311.17743v1", + "title": "Mukhyansh: A Headline Generation Dataset for Indic Languages", + "abstract": "The task of headline generation within the realm of Natural Language\nProcessing (NLP) holds immense significance, as it strives to distill the true\nessence of textual content into concise and attention-grabbing summaries. While\nnoteworthy progress has been made in headline generation for widely spoken\nlanguages like English, there persist numerous challenges when it comes to\ngenerating headlines in low-resource languages, such as the rich and diverse\nIndian languages. A prominent obstacle that specifically hinders headline\ngeneration in Indian languages is the scarcity of high-quality annotated data.\nTo address this crucial gap, we proudly present Mukhyansh, an extensive\nmultilingual dataset, tailored for Indian language headline generation.\nComprising an impressive collection of over 3.39 million article-headline\npairs, Mukhyansh spans across eight prominent Indian languages, namely Telugu,\nTamil, Kannada, Malayalam, Hindi, Bengali, Marathi, and Gujarati. We present a\ncomprehensive evaluation of several state-of-the-art baseline models.\nAdditionally, through an empirical analysis of existing works, we demonstrate\nthat Mukhyansh outperforms all other models, achieving an impressive average\nROUGE-L score of 31.43 across all 8 languages.", + "authors": "Lokesh Madasu, Gopichand Kanumolu, Nirmal Surange, Manish Shrivastava", + "published": "2023-11-29", + "updated": "2023-11-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2308.16884v1", + "title": "The Belebele Benchmark: a Parallel Reading Comprehension Dataset in 122 Language Variants", + "abstract": "We present Belebele, a multiple-choice machine reading comprehension (MRC)\ndataset spanning 122 language variants. Significantly expanding the language\ncoverage of natural language understanding (NLU) benchmarks, this dataset\nenables the evaluation of text models in high-, medium-, and low-resource\nlanguages. Each question is based on a short passage from the Flores-200\ndataset and has four multiple-choice answers. The questions were carefully\ncurated to discriminate between models with different levels of general\nlanguage comprehension. The English dataset on its own proves difficult enough\nto challenge state-of-the-art language models. Being fully parallel, this\ndataset enables direct comparison of model performance across all languages. We\nuse this dataset to evaluate the capabilities of multilingual masked language\nmodels (MLMs) and large language models (LLMs). We present extensive results\nand find that despite significant cross-lingual transfer in English-centric\nLLMs, much smaller MLMs pretrained on balanced multilingual data still\nunderstand far more languages. We also observe that larger vocabulary size and\nconscious vocabulary construction correlate with better performance on\nlow-resource languages. Overall, Belebele opens up new avenues for evaluating\nand analyzing the multilingual capabilities of NLP systems.", + "authors": "Lucas Bandarkar, Davis Liang, Benjamin Muller, Mikel Artetxe, Satya Narayan Shukla, Donald Husa, Naman Goyal, Abhinandan Krishnan, Luke Zettlemoyer, Madian Khabsa", + "published": "2023-08-31", + "updated": "2023-08-31", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG", + "I.2.7" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2001.08210v2", + "title": "Multilingual Denoising Pre-training for Neural Machine Translation", + "abstract": "This paper demonstrates that multilingual denoising pre-training produces\nsignificant performance gains across a wide variety of machine translation (MT)\ntasks. We present mBART -- a sequence-to-sequence denoising auto-encoder\npre-trained on large-scale monolingual corpora in many languages using the BART\nobjective. mBART is one of the first methods for pre-training a complete\nsequence-to-sequence model by denoising full texts in multiple languages, while\nprevious approaches have focused only on the encoder, decoder, or\nreconstructing parts of the text. Pre-training a complete model allows it to be\ndirectly fine tuned for supervised (both sentence-level and document-level) and\nunsupervised machine translation, with no task-specific modifications. We\ndemonstrate that adding mBART initialization produces performance gains in all\nbut the highest-resource settings, including up to 12 BLEU points for low\nresource MT and over 5 BLEU points for many document-level and unsupervised\nmodels. We also show it also enables new types of transfer to language pairs\nwith no bi-text or that were not in the pre-training corpus, and present\nextensive analysis of which factors contribute the most to effective\npre-training.", + "authors": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer", + "published": "2020-01-22", + "updated": "2020-01-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2305.08828v2", + "title": "PMIndiaSum: Multilingual and Cross-lingual Headline Summarization for Languages in India", + "abstract": "This paper introduces PMIndiaSum, a multilingual and massively parallel\nsummarization corpus focused on languages in India. Our corpus provides a\ntraining and testing ground for four language families, 14 languages, and the\nlargest to date with 196 language pairs. We detail our construction workflow\nincluding data acquisition, processing, and quality assurance. Furthermore, we\npublish benchmarks for monolingual, cross-lingual, and multilingual\nsummarization by fine-tuning, prompting, as well as translate-and-summarize.\nExperimental results confirm the crucial role of our data in aiding\nsummarization between Indian languages. Our dataset is publicly available and\ncan be freely modified and re-distributed.", + "authors": "Ashok Urlana, Pinzhen Chen, Zheng Zhao, Shay B. Cohen, Manish Shrivastava, Barry Haddow", + "published": "2023-05-15", + "updated": "2023-10-20", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "I.2.7" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2112.08804v3", + "title": "CrossSum: Beyond English-Centric Cross-Lingual Summarization for 1,500+ Language Pairs", + "abstract": "We present CrossSum, a large-scale cross-lingual summarization dataset\ncomprising 1.68 million article-summary samples in 1,500+ language pairs. We\ncreate CrossSum by aligning parallel articles written in different languages\nvia cross-lingual retrieval from a multilingual abstractive summarization\ndataset and perform a controlled human evaluation to validate its quality. We\npropose a multistage data sampling algorithm to effectively train a\ncross-lingual summarization model capable of summarizing an article in any\ntarget language. We also introduce LaSE, an embedding-based metric for\nautomatically evaluating model-generated summaries. LaSE is strongly correlated\nwith ROUGE and, unlike ROUGE, can be reliably measured even in the absence of\nreferences in the target language. Performance on ROUGE and LaSE indicate that\nour proposed model consistently outperforms baseline models. To the best of our\nknowledge, CrossSum is the largest cross-lingual summarization dataset and the\nfirst ever that is not centered around English. We are releasing the dataset,\ntraining and evaluation scripts, and models to spur future research on\ncross-lingual summarization. The resources can be found at\nhttps://github.com/csebuetnlp/CrossSum", + "authors": "Abhik Bhattacharjee, Tahmid Hasan, Wasi Uddin Ahmad, Yuan-Fang Li, Yong-Bin Kang, Rifat Shahriyar", + "published": "2021-12-16", + "updated": "2023-05-25", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2010.11934v3", + "title": "mT5: A massively multilingual pre-trained text-to-text transformer", + "abstract": "The recent \"Text-to-Text Transfer Transformer\" (T5) leveraged a unified\ntext-to-text format and scale to attain state-of-the-art results on a wide\nvariety of English-language NLP tasks. In this paper, we introduce mT5, a\nmultilingual variant of T5 that was pre-trained on a new Common Crawl-based\ndataset covering 101 languages. We detail the design and modified training of\nmT5 and demonstrate its state-of-the-art performance on many multilingual\nbenchmarks. We also describe a simple technique to prevent \"accidental\ntranslation\" in the zero-shot setting, where a generative model chooses to\n(partially) translate its prediction into the wrong language. All of the code\nand model checkpoints used in this work are publicly available.", + "authors": "Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel", + "published": "2020-10-22", + "updated": "2021-03-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2305.16307v3", + "title": "IndicTrans2: Towards High-Quality and Accessible Machine Translation Models for all 22 Scheduled Indian Languages", + "abstract": "India has a rich linguistic landscape with languages from 4 major language\nfamilies spoken by over a billion people. 22 of these languages are listed in\nthe Constitution of India (referred to as scheduled languages) are the focus of\nthis work. Given the linguistic diversity, high-quality and accessible Machine\nTranslation (MT) systems are essential in a country like India. Prior to this\nwork, there was (i) no parallel training data spanning all 22 languages, (ii)\nno robust benchmarks covering all these languages and containing content\nrelevant to India, and (iii) no existing translation models which support all\nthe 22 scheduled languages of India. In this work, we aim to address this gap\nby focusing on the missing pieces required for enabling wide, easy, and open\naccess to good machine translation systems for all 22 scheduled Indian\nlanguages. We identify four key areas of improvement: curating and creating\nlarger training datasets, creating diverse and high-quality benchmarks,\ntraining multilingual models, and releasing models with open access. Our first\ncontribution is the release of the Bharat Parallel Corpus Collection (BPCC),\nthe largest publicly available parallel corpora for Indic languages. BPCC\ncontains a total of 230M bitext pairs, of which a total of 126M were newly\nadded, including 644K manually translated sentence pairs created as part of\nthis work. Our second contribution is the release of the first n-way parallel\nbenchmark covering all 22 Indian languages, featuring diverse domains,\nIndian-origin content, and source-original test sets. Next, we present\nIndicTrans2, the first model to support all 22 languages, surpassing existing\nmodels on multiple existing and new benchmarks created as a part of this work.\nLastly, to promote accessibility and collaboration, we release our models and\nassociated data with permissive licenses at\nhttps://github.com/AI4Bharat/IndicTrans2.", + "authors": "Jay Gala, Pranjal A. Chitale, Raghavan AK, Varun Gumma, Sumanth Doddapaneni, Aswanth Kumar, Janki Nawale, Anupama Sujatha, Ratish Puduppully, Vivek Raghavan, Pratyush Kumar, Mitesh M. Khapra, Raj Dabre, Anoop Kunchukuttan", + "published": "2023-05-25", + "updated": "2023-12-20", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2212.10168v2", + "title": "Naamapadam: A Large-Scale Named Entity Annotated Data for Indic Languages", + "abstract": "We present, Naamapadam, the largest publicly available Named Entity\nRecognition (NER) dataset for the 11 major Indian languages from two language\nfamilies. The dataset contains more than 400k sentences annotated with a total\nof at least 100k entities from three standard entity categories (Person,\nLocation, and, Organization) for 9 out of the 11 languages. The training\ndataset has been automatically created from the Samanantar parallel corpus by\nprojecting automatically tagged entities from an English sentence to the\ncorresponding Indian language translation. We also create manually annotated\ntestsets for 9 languages. We demonstrate the utility of the obtained dataset on\nthe Naamapadam-test dataset. We also release IndicNER, a multilingual IndicBERT\nmodel fine-tuned on Naamapadam training set. IndicNER achieves an F1 score of\nmore than $80$ for $7$ out of $9$ test languages. The dataset and models are\navailable under open-source licences at\nhttps://ai4bharat.iitm.ac.in/naamapadam.", + "authors": "Arnav Mhaske, Harshit Kedia, Sumanth Doddapaneni, Mitesh M. Khapra, Pratyush Kumar, Rudra Murthy V, Anoop Kunchukuttan", + "published": "2022-12-20", + "updated": "2023-05-28", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2104.05596v4", + "title": "Samanantar: The Largest Publicly Available Parallel Corpora Collection for 11 Indic Languages", + "abstract": "We present Samanantar, the largest publicly available parallel corpora\ncollection for Indic languages. The collection contains a total of 49.7 million\nsentence pairs between English and 11 Indic languages (from two language\nfamilies). Specifically, we compile 12.4 million sentence pairs from existing,\npublicly-available parallel corpora, and additionally mine 37.4 million\nsentence pairs from the web, resulting in a 4x increase. We mine the parallel\nsentences from the web by combining many corpora, tools, and methods: (a)\nweb-crawled monolingual corpora, (b) document OCR for extracting sentences from\nscanned documents, (c) multilingual representation models for aligning\nsentences, and (d) approximate nearest neighbor search for searching in a large\ncollection of sentences. Human evaluation of samples from the newly mined\ncorpora validate the high quality of the parallel sentences across 11\nlanguages. Further, we extract 83.4 million sentence pairs between all 55 Indic\nlanguage pairs from the English-centric parallel corpus using English as the\npivot language. We trained multilingual NMT models spanning all these languages\non Samanantar, which outperform existing models and baselines on publicly\navailable benchmarks, such as FLORES, establishing the utility of Samanantar.\nOur data and models are available publicly at\nhttps://ai4bharat.iitm.ac.in/samanantar and we hope they will help advance\nresearch in NMT and multilingual NLP for Indic languages.", + "authors": "Gowtham Ramesh, Sumanth Doddapaneni, Aravinth Bheemaraj, Mayank Jobanputra, Raghavan AK, Ajitesh Sharma, Sujit Sahoo, Harshita Diddee, Mahalakshmi J, Divyanshu Kakwani, Navneet Kumar, Aswin Pradeep, Srihari Nagaraj, Kumar Deepak, Vivek Raghavan, Anoop Kunchukuttan, Pratyush Kumar, Mitesh Shantadevi Khapra", + "published": "2021-04-12", + "updated": "2023-06-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2010.11856v3", + "title": "XOR QA: Cross-lingual Open-Retrieval Question Answering", + "abstract": "Multilingual question answering tasks typically assume answers exist in the\nsame language as the question. Yet in practice, many languages face both\ninformation scarcity -- where languages have few reference articles -- and\ninformation asymmetry -- where questions reference concepts from other\ncultures. This work extends open-retrieval question answering to a\ncross-lingual setting enabling questions from one language to be answered via\nanswer content from another language. We construct a large-scale dataset built\non questions from TyDi QA lacking same-language answers. Our task formulation,\ncalled Cross-lingual Open Retrieval Question Answering (XOR QA), includes 40k\ninformation-seeking questions from across 7 diverse non-English languages.\nBased on this dataset, we introduce three new tasks that involve cross-lingual\ndocument retrieval using multi-lingual and English resources. We establish\nbaselines with state-of-the-art machine translation systems and cross-lingual\npretrained models. Experimental results suggest that XOR QA is a challenging\ntask that will facilitate the development of novel techniques for multilingual\nquestion answering. Our data and code are available at\nhttps://nlp.cs.washington.edu/xorqa.", + "authors": "Akari Asai, Jungo Kasai, Jonathan H. Clark, Kenton Lee, Eunsol Choi, Hannaneh Hajishirzi", + "published": "2020-10-22", + "updated": "2021-04-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2305.11938v2", + "title": "XTREME-UP: A User-Centric Scarce-Data Benchmark for Under-Represented Languages", + "abstract": "Data scarcity is a crucial issue for the development of highly multilingual\nNLP systems. Yet for many under-represented languages (ULs) -- languages for\nwhich NLP re-search is particularly far behind in meeting user needs -- it is\nfeasible to annotate small amounts of data. Motivated by this, we propose\nXTREME-UP, a benchmark defined by: its focus on the scarce-data scenario rather\nthan zero-shot; its focus on user-centric tasks -- tasks with broad adoption by\nspeakers of high-resource languages; and its focus on under-represented\nlanguages where this scarce-data scenario tends to be most realistic. XTREME-UP\nevaluates the capabilities of language models across 88 under-represented\nlanguages over 9 key user-centric technologies including ASR, OCR, MT, and\ninformation access tasks that are of general utility. We create new datasets\nfor OCR, autocomplete, semantic parsing, and transliteration, and build on and\nrefine existing datasets for other tasks. XTREME-UP provides methodology for\nevaluating many modeling scenarios including text-only, multi-modal (vision,\naudio, and text),supervised parameter tuning, and in-context learning. We\nevaluate commonly used models on the benchmark. We release all code and scripts\nto train and evaluate models", + "authors": "Sebastian Ruder, Jonathan H. Clark, Alexander Gutkin, Mihir Kale, Min Ma, Massimo Nicosia, Shruti Rijhwani, Parker Riley, Jean-Michel A. Sarr, Xinyi Wang, John Wieting, Nitish Gupta, Anna Katanova, Christo Kirov, Dana L. Dickinson, Brian Roark, Bidisha Samanta, Connie Tao, David I. Adelani, Vera Axelrod, Isaac Caswell, Colin Cherry, Dan Garrette, Reeve Ingle, Melvin Johnson, Dmitry Panteleev, Partha Talukdar", + "published": "2023-05-19", + "updated": "2023-05-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.03057v1", + "title": "Language Models are Multilingual Chain-of-Thought Reasoners", + "abstract": "We evaluate the reasoning abilities of large language models in multilingual\nsettings. We introduce the Multilingual Grade School Math (MGSM) benchmark, by\nmanually translating 250 grade-school math problems from the GSM8K dataset\n(Cobbe et al., 2021) into ten typologically diverse languages. We find that the\nability to solve MGSM problems via chain-of-thought prompting emerges with\nincreasing model scale, and that models have strikingly strong multilingual\nreasoning abilities, even in underrepresented languages such as Bengali and\nSwahili. Finally, we show that the multilingual reasoning abilities of language\nmodels extend to other tasks such as commonsense reasoning and word-in-context\nsemantic judgment. The MGSM benchmark is publicly available at\nhttps://github.com/google-research/url-nlp.", + "authors": "Freda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, Dipanjan Das, Jason Wei", + "published": "2022-10-06", + "updated": "2022-10-06", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2212.05409v3", + "title": "Towards Leaving No Indic Language Behind: Building Monolingual Corpora, Benchmark and Models for Indic Languages", + "abstract": "Building Natural Language Understanding (NLU) capabilities for Indic\nlanguages, which have a collective speaker base of more than one billion\nspeakers is absolutely crucial. In this work, we aim to improve the NLU\ncapabilities of Indic languages by making contributions along 3 important axes\n(i) monolingual corpora (ii) NLU testsets (iii) multilingual LLMs focusing on\nIndic languages. Specifically, we curate the largest monolingual corpora,\nIndicCorp, with 20.9B tokens covering 24 languages from 4 language families - a\n2.3x increase over prior work, while supporting 12 additional languages. Next,\nwe create a human-supervised benchmark, IndicXTREME, consisting of nine diverse\nNLU tasks covering 20 languages. Across languages and tasks, IndicXTREME\ncontains a total of 105 evaluation sets, of which 52 are new contributions to\nthe literature. To the best of our knowledge, this is the first effort towards\ncreating a standard benchmark for Indic languages that aims to test the\nmultilingual zero-shot capabilities of pretrained language models. Finally, we\ntrain IndicBERT v2, a state-of-the-art model supporting all the languages.\nAveraged across languages and tasks, the model achieves an absolute improvement\nof 2 points over a strong baseline. The data and models are available at\nhttps://github.com/AI4Bharat/IndicBERT.", + "authors": "Sumanth Doddapaneni, Rahul Aralikatte, Gowtham Ramesh, Shreya Goyal, Mitesh M. Khapra, Anoop Kunchukuttan, Pratyush Kumar", + "published": "2022-12-11", + "updated": "2023-05-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2005.00333v2", + "title": "XCOPA: A Multilingual Dataset for Causal Commonsense Reasoning", + "abstract": "In order to simulate human language capacity, natural language processing\nsystems must be able to reason about the dynamics of everyday situations,\nincluding their possible causes and effects. Moreover, they should be able to\ngeneralise the acquired world knowledge to new languages, modulo cultural\ndifferences. Advances in machine reasoning and cross-lingual transfer depend on\nthe availability of challenging evaluation benchmarks. Motivated by both\ndemands, we introduce Cross-lingual Choice of Plausible Alternatives (XCOPA), a\ntypologically diverse multilingual dataset for causal commonsense reasoning in\n11 languages, which includes resource-poor languages like Eastern Apur\\'imac\nQuechua and Haitian Creole. We evaluate a range of state-of-the-art models on\nthis novel dataset, revealing that the performance of current methods based on\nmultilingual pretraining and zero-shot fine-tuning falls short compared to\ntranslation-based transfer. Finally, we propose strategies to adapt\nmultilingual models to out-of-sample resource-lean languages where only a small\ncorpus or a bilingual dictionary is available, and report substantial\nimprovements over the random baseline. The XCOPA dataset is freely available at\ngithub.com/cambridgeltl/xcopa.", + "authors": "Edoardo Maria Ponti, Goran Glava\u0161, Olga Majewska, Qianchu Liu, Ivan Vuli\u0107, Anna Korhonen", + "published": "2020-05-01", + "updated": "2020-10-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2402.14208v2", + "title": "Content Conditional Debiasing for Fair Text Embedding", + "abstract": "Mitigating biases in machine learning models has gained increasing attention\nin Natural Language Processing (NLP). Yet, only a few studies focus on fair\ntext embeddings, which are crucial yet challenging for real-world applications.\nIn this paper, we propose a novel method for learning fair text embeddings. We\nachieve fairness while maintaining utility trade-off by ensuring conditional\nindependence between sensitive attributes and text embeddings conditioned on\nthe content. Specifically, we enforce that embeddings of texts with different\nsensitive attributes but identical content maintain the same distance toward\nthe embedding of their corresponding neutral text. Furthermore, we address the\nissue of lacking proper training data by using Large Language Models (LLMs) to\naugment texts into different sensitive groups. Our extensive evaluations\ndemonstrate that our approach effectively improves fairness while preserving\nthe utility of embeddings, representing a pioneering effort in achieving\nconditional independence for fair text embeddings.", + "authors": "Wenlong Deng, Blair Chen, Xiaoxiao Li, Christos Thrampoulidis", + "published": "2024-02-22", + "updated": "2024-02-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.CY", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.12150v1", + "title": "Your Large Language Model is Secretly a Fairness Proponent and You Should Prompt it Like One", + "abstract": "The widespread adoption of large language models (LLMs) underscores the\nurgent need to ensure their fairness. However, LLMs frequently present dominant\nviewpoints while ignoring alternative perspectives from minority parties,\nresulting in potential biases. We hypothesize that these fairness-violating\nbehaviors occur because LLMs express their viewpoints using a human personality\nthat represents the majority of training data. In response to this, we validate\nthat prompting LLMs with specific roles can allow LLMs to express diverse\nviewpoints. Building on this insight and observation, we develop FairThinking,\na pipeline designed to automatically generate roles that enable LLMs to\narticulate diverse perspectives for fair expressions. To evaluate FairThinking,\nwe create a dataset with a thousand items covering three fairness-related\ntopics and conduct experiments on GPT-3.5, GPT-4, Llama2, and Mistral to\ndemonstrate its superior performance.", + "authors": "Tianlin Li, Xiaoyu Zhang, Chao Du, Tianyu Pang, Qian Liu, Qing Guo, Chao Shen, Yang Liu", + "published": "2024-02-19", + "updated": "2024-02-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "I.2; J.4" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2309.03852v2", + "title": "FLM-101B: An Open LLM and How to Train It with $100K Budget", + "abstract": "Large language models (LLMs) have achieved remarkable success in NLP and\nmultimodal tasks, among others. Despite these successes, two main challenges\nremain in developing LLMs: (i) high computational cost, and (ii) fair and\nobjective evaluations. In this paper, we report a solution to significantly\nreduce LLM training cost through a growth strategy. We demonstrate that a\n101B-parameter LLM with 0.31T tokens can be trained with a budget of 100K US\ndollars. Inspired by IQ tests, we also consolidate an additional range of\nevaluations on top of existing evaluations that focus on knowledge-oriented\nabilities. These IQ evaluations include symbolic mapping, rule understanding,\npattern mining, and anti-interference. Such evaluations minimize the potential\nimpact of memorization. Experimental results show that our model, named\nFLM-101B, trained with a budget of 100K US dollars, achieves performance\ncomparable to powerful and well-known models, e.g., GPT-3 and GLM-130B,\nespecially on the additional range of IQ evaluations. The checkpoint of\nFLM-101B is released at https://huggingface.co/CofeAI/FLM-101B.", + "authors": "Xiang Li, Yiqun Yao, Xin Jiang, Xuezhi Fang, Xuying Meng, Siqi Fan, Peng Han, Jing Li, Li Du, Bowen Qin, Zheng Zhang, Aixin Sun, Yequan Wang", + "published": "2023-09-07", + "updated": "2023-09-17", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2308.05374v2", + "title": "Trustworthy LLMs: a Survey and Guideline for Evaluating Large Language Models' Alignment", + "abstract": "Ensuring alignment, which refers to making models behave in accordance with\nhuman intentions [1,2], has become a critical task before deploying large\nlanguage models (LLMs) in real-world applications. For instance, OpenAI devoted\nsix months to iteratively aligning GPT-4 before its release [3]. However, a\nmajor challenge faced by practitioners is the lack of clear guidance on\nevaluating whether LLM outputs align with social norms, values, and\nregulations. This obstacle hinders systematic iteration and deployment of LLMs.\nTo address this issue, this paper presents a comprehensive survey of key\ndimensions that are crucial to consider when assessing LLM trustworthiness. The\nsurvey covers seven major categories of LLM trustworthiness: reliability,\nsafety, fairness, resistance to misuse, explainability and reasoning, adherence\nto social norms, and robustness. Each major category is further divided into\nseveral sub-categories, resulting in a total of 29 sub-categories.\nAdditionally, a subset of 8 sub-categories is selected for further\ninvestigation, where corresponding measurement studies are designed and\nconducted on several widely-used LLMs. The measurement results indicate that,\nin general, more aligned models tend to perform better in terms of overall\ntrustworthiness. However, the effectiveness of alignment varies across the\ndifferent trustworthiness categories considered. This highlights the importance\nof conducting more fine-grained analyses, testing, and making continuous\nimprovements on LLM alignment. By shedding light on these key dimensions of LLM\ntrustworthiness, this paper aims to provide valuable insights and guidance to\npractitioners in the field. Understanding and addressing these concerns will be\ncrucial in achieving reliable and ethically sound deployment of LLMs in various\napplications.", + "authors": "Yang Liu, Yuanshun Yao, Jean-Francois Ton, Xiaoying Zhang, Ruocheng Guo, Hao Cheng, Yegor Klochkov, Muhammad Faaiz Taufiq, Hang Li", + "published": "2023-08-10", + "updated": "2024-03-21", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2308.10149v2", + "title": "A Survey on Fairness in Large Language Models", + "abstract": "Large Language Models (LLMs) have shown powerful performance and development\nprospects and are widely deployed in the real world. However, LLMs can capture\nsocial biases from unprocessed training data and propagate the biases to\ndownstream tasks. Unfair LLM systems have undesirable social impacts and\npotential harms. In this paper, we provide a comprehensive review of related\nresearch on fairness in LLMs. Considering the influence of parameter magnitude\nand training paradigm on research strategy, we divide existing fairness\nresearch into oriented to medium-sized LLMs under pre-training and fine-tuning\nparadigms and oriented to large-sized LLMs under prompting paradigms. First,\nfor medium-sized LLMs, we introduce evaluation metrics and debiasing methods\nfrom the perspectives of intrinsic bias and extrinsic bias, respectively. Then,\nfor large-sized LLMs, we introduce recent fairness research, including fairness\nevaluation, reasons for bias, and debiasing methods. Finally, we discuss and\nprovide insight on the challenges and future directions for the development of\nfairness in LLMs.", + "authors": "Yingji Li, Mengnan Du, Rui Song, Xin Wang, Ying Wang", + "published": "2023-08-20", + "updated": "2024-02-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.04814v2", + "title": "Evaluation of LLMs on Syntax-Aware Code Fill-in-the-Middle Tasks", + "abstract": "We introduce Syntax-Aware Fill-In-the-Middle (SAFIM), a new benchmark for\nevaluating Large Language Models (LLMs) on the code Fill-in-the-Middle (FIM)\ntask. This benchmark focuses on syntax-aware completions of program structures\nsuch as code blocks and conditional expressions, and includes 17,720 examples\nfrom multiple programming languages, sourced from recent code submissions after\nApril 2022 to minimize data contamination. SAFIM provides a robust framework\nwith various prompt designs and novel syntax-aware post-processing techniques,\nfacilitating accurate and fair comparisons across LLMs. Our comprehensive\nevaluation of 15 LLMs shows that FIM pretraining not only enhances FIM\nproficiency but also improves Left-to-Right (L2R) inference using LLMs. Our\nfindings challenge conventional beliefs and suggest that pretraining methods\nand data quality have more impact than model size. SAFIM thus serves as a\nfoundational platform for future research in effective pretraining strategies\nfor code LLMs. The evaluation toolkit and dataset are available at\nhttps://github.com/gonglinyuan/safim, and the leaderboard is available at\nhttps://safimbenchmark.com.", + "authors": "Linyuan Gong, Sida Wang, Mostafa Elhoushi, Alvin Cheung", + "published": "2024-03-07", + "updated": "2024-04-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG", + "cs.SE" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.04205v2", + "title": "Rephrase and Respond: Let Large Language Models Ask Better Questions for Themselves", + "abstract": "Misunderstandings arise not only in interpersonal communication but also\nbetween humans and Large Language Models (LLMs). Such discrepancies can make\nLLMs interpret seemingly unambiguous questions in unexpected ways, yielding\nincorrect responses. While it is widely acknowledged that the quality of a\nprompt, such as a question, significantly impacts the quality of the response\nprovided by LLMs, a systematic method for crafting questions that LLMs can\nbetter comprehend is still underdeveloped. In this paper, we present a method\nnamed `Rephrase and Respond' (RaR), which allows LLMs to rephrase and expand\nquestions posed by humans and provide responses in a single prompt. This\napproach serves as a simple yet effective prompting method for improving\nperformance. We also introduce a two-step variant of RaR, where a rephrasing\nLLM first rephrases the question and then passes the original and rephrased\nquestions together to a different responding LLM. This facilitates the\neffective utilization of rephrased questions generated by one LLM with another.\nOur experiments demonstrate that our methods significantly improve the\nperformance of different models across a wide range to tasks. We further\nprovide a comprehensive comparison between RaR and the popular Chain-of-Thought\n(CoT) methods, both theoretically and empirically. We show that RaR is\ncomplementary to CoT and can be combined with CoT to achieve even better\nperformance. Our work not only contributes to enhancing LLM performance\nefficiently and effectively but also sheds light on a fair evaluation of LLM\ncapabilities. Data and codes are available at\nhttps://github.com/uclaml/Rephrase-and-Respond.", + "authors": "Yihe Deng, Weitong Zhang, Zixiang Chen, Quanquan Gu", + "published": "2023-11-07", + "updated": "2024-04-18", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.06852v2", + "title": "ChemLLM: A Chemical Large Language Model", + "abstract": "Large language models (LLMs) have made impressive progress in chemistry\napplications. However, the community lacks an LLM specifically designed for\nchemistry. The main challenges are two-fold: firstly, most chemical data and\nscientific knowledge are stored in structured databases, which limits the\nmodel's ability to sustain coherent dialogue when used directly. Secondly,\nthere is an absence of objective and fair benchmark that encompass most\nchemistry tasks. Here, we introduce ChemLLM, a comprehensive framework that\nfeatures the first LLM dedicated to chemistry. It also includes ChemData, a\ndataset specifically designed for instruction tuning, and ChemBench, a robust\nbenchmark covering nine essential chemistry tasks. ChemLLM is adept at\nperforming various tasks across chemical disciplines with fluid dialogue\ninteraction. Notably, ChemLLM achieves results comparable to GPT-4 on the core\nchemical tasks and demonstrates competitive performance with LLMs of similar\nsize in general scenarios. ChemLLM paves a new path for exploration in chemical\nstudies, and our method of incorporating structured chemical knowledge into\ndialogue systems sets a new standard for developing LLMs in various scientific\nfields. Codes, Datasets, and Model weights are publicly accessible at\nhttps://hf.co/AI4Chem", + "authors": "Di Zhang, Wei Liu, Qian Tan, Jingdan Chen, Hang Yan, Yuliang Yan, Jiatong Li, Weiran Huang, Xiangyu Yue, Wanli Ouyang, Dongzhan Zhou, Shufei Zhang, Mao Su, Han-Sen Zhong, Yuqiang Li", + "published": "2024-02-10", + "updated": "2024-04-25", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.19118v1", + "title": "Encouraging Divergent Thinking in Large Language Models through Multi-Agent Debate", + "abstract": "Modern large language models (LLMs) like ChatGPT have shown remarkable\nperformance on general language tasks but still struggle on complex reasoning\ntasks, which drives the research on cognitive behaviors of LLMs to explore\nhuman-like problem-solving strategies. Along this direction, one representative\nstrategy is self-reflection, which asks an LLM to refine the solution with the\nfeedback generated by itself iteratively. However, our study shows that such\nreflection-style methods suffer from the Degeneration-of-Thought (DoT) problem:\nonce the LLM has established confidence in its solutions, it is unable to\ngenerate novel thoughts later through reflection even if its initial stance is\nincorrect. To address the DoT problem, we propose a Multi-Agent Debate (MAD)\nframework, in which multiple agents express their arguments in the state of\n\"tit for tat\" and a judge manages the debate process to obtain a final\nsolution. Clearly, our MAD framework encourages divergent thinking in LLMs\nwhich would be helpful for tasks that require deep levels of contemplation.\nExperiment results on two challenging datasets, commonsense machine translation\nand counter-intuitive arithmetic reasoning, demonstrate the effectiveness of\nour MAD framework. Extensive analyses suggest that the adaptive break of debate\nand the modest level of \"tit for tat\" state are required for MAD to obtain good\nperformance. Moreover, we find that LLMs might not be a fair judge if different\nLLMs are used for agents. Codes:\nhttps://github.com/Skytliang/Multi-Agents-Debate", + "authors": "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Zhaopeng Tu, Shuming Shi", + "published": "2023-05-30", + "updated": "2023-05-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.08780v1", + "title": "\"Im not Racist but...\": Discovering Bias in the Internal Knowledge of Large Language Models", + "abstract": "Large language models (LLMs) have garnered significant attention for their\nremarkable performance in a continuously expanding set of natural language\nprocessing tasks. However, these models have been shown to harbor inherent\nsocietal biases, or stereotypes, which can adversely affect their performance\nin their many downstream applications. In this paper, we introduce a novel,\npurely prompt-based approach to uncover hidden stereotypes within any arbitrary\nLLM. Our approach dynamically generates a knowledge representation of internal\nstereotypes, enabling the identification of biases encoded within the LLM's\ninternal knowledge. By illuminating the biases present in LLMs and offering a\nsystematic methodology for their analysis, our work contributes to advancing\ntransparency and promoting fairness in natural language processing systems.", + "authors": "Abel Salinas, Louis Penafiel, Robert McCormack, Fred Morstatter", + "published": "2023-10-13", + "updated": "2023-10-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.19465v1", + "title": "Towards Tracing Trustworthiness Dynamics: Revisiting Pre-training Period of Large Language Models", + "abstract": "Ensuring the trustworthiness of large language models (LLMs) is crucial. Most\nstudies concentrate on fully pre-trained LLMs to better understand and improve\nLLMs' trustworthiness. In this paper, to reveal the untapped potential of\npre-training, we pioneer the exploration of LLMs' trustworthiness during this\nperiod, focusing on five key dimensions: reliability, privacy, toxicity,\nfairness, and robustness. To begin with, we apply linear probing to LLMs. The\nhigh probing accuracy suggests that \\textit{LLMs in early pre-training can\nalready distinguish concepts in each trustworthiness dimension}. Therefore, to\nfurther uncover the hidden possibilities of pre-training, we extract steering\nvectors from a LLM's pre-training checkpoints to enhance the LLM's\ntrustworthiness. Finally, inspired by~\\citet{choi2023understanding} that mutual\ninformation estimation is bounded by linear probing accuracy, we also probe\nLLMs with mutual information to investigate the dynamics of trustworthiness\nduring pre-training. We are the first to observe a similar two-phase\nphenomenon: fitting and compression~\\citep{shwartz2017opening}. This research\nprovides an initial exploration of trustworthiness modeling during LLM\npre-training, seeking to unveil new insights and spur further developments in\nthe field. We will make our code publicly accessible at\n\\url{https://github.com/ChnQ/TracingLLM}.", + "authors": "Chen Qian, Jie Zhang, Wei Yao, Dongrui Liu, Zhenfei Yin, Yu Qiao, Yong Liu, Jing Shao", + "published": "2024-02-29", + "updated": "2024-02-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.18130v2", + "title": "DELPHI: Data for Evaluating LLMs' Performance in Handling Controversial Issues", + "abstract": "Controversy is a reflection of our zeitgeist, and an important aspect to any\ndiscourse. The rise of large language models (LLMs) as conversational systems\nhas increased public reliance on these systems for answers to their various\nquestions. Consequently, it is crucial to systematically examine how these\nmodels respond to questions that pertaining to ongoing debates. However, few\nsuch datasets exist in providing human-annotated labels reflecting the\ncontemporary discussions. To foster research in this area, we propose a novel\nconstruction of a controversial questions dataset, expanding upon the publicly\nreleased Quora Question Pairs Dataset. This dataset presents challenges\nconcerning knowledge recency, safety, fairness, and bias. We evaluate different\nLLMs using a subset of this dataset, illuminating how they handle controversial\nissues and the stances they adopt. This research ultimately contributes to our\nunderstanding of LLMs' interaction with controversial issues, paving the way\nfor improvements in their comprehension and handling of complex societal\ndebates.", + "authors": "David Q. Sun, Artem Abzaliev, Hadas Kotek, Zidi Xiu, Christopher Klein, Jason D. Williams", + "published": "2023-10-27", + "updated": "2023-11-07", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.HC" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2401.04057v1", + "title": "Unveiling Bias in Fairness Evaluations of Large Language Models: A Critical Literature Review of Music and Movie Recommendation Systems", + "abstract": "The rise of generative artificial intelligence, particularly Large Language\nModels (LLMs), has intensified the imperative to scrutinize fairness alongside\naccuracy. Recent studies have begun to investigate fairness evaluations for\nLLMs within domains such as recommendations. Given that personalization is an\nintrinsic aspect of recommendation systems, its incorporation into fairness\nassessments is paramount. Yet, the degree to which current fairness evaluation\nframeworks account for personalization remains unclear. Our comprehensive\nliterature review aims to fill this gap by examining how existing frameworks\nhandle fairness evaluations of LLMs, with a focus on the integration of\npersonalization factors. Despite an exhaustive collection and analysis of\nrelevant works, we discovered that most evaluations overlook personalization, a\ncritical facet of recommendation systems, thereby inadvertently perpetuating\nunfair practices. Our findings shed light on this oversight and underscore the\nurgent need for more nuanced fairness evaluations that acknowledge\npersonalization. Such improvements are vital for fostering equitable\ndevelopment within the AI community.", + "authors": "Chandan Kumar Sah, Dr. Lian Xiaoli, Muhammad Mirajul Islam", + "published": "2024-01-08", + "updated": "2024-01-08", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR", + "cs.AI", + "cs.SE" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2401.00588v1", + "title": "Fairness in Serving Large Language Models", + "abstract": "High-demand LLM inference services (e.g., ChatGPT and BARD) support a wide\nrange of requests from short chat conversations to long document reading. To\nensure that all client requests are processed fairly, most major LLM inference\nservices have request rate limits, to ensure that no client can dominate the\nrequest queue. However, this rudimentary notion of fairness also results in\nunder-utilization of the resources and poor client experience when there is\nspare capacity. While there is a rich literature on fair scheduling, serving\nLLMs presents new challenges due to their unpredictable request lengths and\ntheir unique batching characteristics on parallel accelerators. This paper\nintroduces the definition of LLM serving fairness based on a cost function that\naccounts for the number of input and output tokens processed. To achieve\nfairness in serving, we propose a novel scheduling algorithm, the Virtual Token\nCounter (VTC), a fair scheduler based on the continuous batching mechanism. We\nprove a 2x tight upper bound on the service difference between two backlogged\nclients, adhering to the requirement of work-conserving. Through extensive\nexperiments, we demonstrate the superior performance of VTC in ensuring\nfairness, especially in contrast to other baseline methods, which exhibit\nshortcomings under various conditions.", + "authors": "Ying Sheng, Shiyi Cao, Dacheng Li, Banghua Zhu, Zhuohan Li, Danyang Zhuo, Joseph E. Gonzalez, Ion Stoica", + "published": "2023-12-31", + "updated": "2023-12-31", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.LG", + "cs.PF" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2405.02219v1", + "title": "FairEvalLLM. A Comprehensive Framework for Benchmarking Fairness in Large Language Model Recommender Systems", + "abstract": "This paper presents a framework for evaluating fairness in recommender\nsystems powered by Large Language Models (RecLLMs), addressing the need for a\nunified approach that spans various fairness dimensions including sensitivity\nto user attributes, intrinsic fairness, and discussions of fairness based on\nunderlying benefits. In addition, our framework introduces counterfactual\nevaluations and integrates diverse user group considerations to enhance the\ndiscourse on fairness evaluation for RecLLMs.\n Our key contributions include the development of a robust framework for\nfairness evaluation in LLM-based recommendations and a structured method to\ncreate \\textit{informative user profiles} from demographic data, historical\nuser preferences, and recent interactions. We argue that the latter is\nessential for enhancing personalization in such systems, especially in\ntemporal-driven scenarios. We demonstrate the utility of our framework through\npractical applications on two datasets, LastFM-1K and ML-1M. We conduct\nexperiments on a subsample of 80 users from each dataset, testing and assessing\nthe effectiveness of various prompt construction scenarios and in-context\nlearning, comprising more than 50 scenarios. This results in more than 4000\nrecommendations (80 * 50 = 4000). Our study reveals that while there are no\nsignificant unfairness issues in scenarios involving sensitive attributes, some\nconcerns remain. However, in terms of intrinsic fairness, which does not\ninvolve direct sensitivity, unfairness across demographic groups remains\nsignificant. The code and data used for this paper are available at:\n\\url{https://shorturl.at/awBFM}.", + "authors": "Yashar Deldjoo", + "published": "2024-05-03", + "updated": "2024-05-03", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.02839v1", + "title": "An Empirical Study of LLM-as-a-Judge for LLM Evaluation: Fine-tuned Judge Models are Task-specific Classifiers", + "abstract": "Recently, there has been a growing trend of utilizing Large Language Model\n(LLM) to evaluate the quality of other LLMs. Many studies have employed\nproprietary close-source models, especially GPT4, as the evaluator.\nAlternatively, other works have fine-tuned judge models based on open-source\nLLMs as the evaluator. In this study, we conduct an empirical study of\ndifferent judge models on their evaluation capability. Our findings indicate\nthat although the fine-tuned judge models achieve high accuracy on in-domain\ntest sets, even surpassing GPT4, they are inherently task-specific classifiers,\nand their generalizability and fairness severely underperform GPT4.", + "authors": "Hui Huang, Yingqi Qu, Jing Liu, Muyun Yang, Tiejun Zhao", + "published": "2024-03-05", + "updated": "2024-03-05", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.04892v2", + "title": "Bias Runs Deep: Implicit Reasoning Biases in Persona-Assigned LLMs", + "abstract": "Recent works have showcased the ability of LLMs to embody diverse personas in\ntheir responses, exemplified by prompts like 'You are Yoda. Explain the Theory\nof Relativity.' While this ability allows personalization of LLMs and enables\nhuman behavior simulation, its effect on LLMs' capabilities remains unclear. To\nfill this gap, we present the first extensive study of the unintended\nside-effects of persona assignment on the ability of LLMs to perform basic\nreasoning tasks. Our study covers 24 reasoning datasets, 4 LLMs, and 19 diverse\npersonas (e.g. an Asian person) spanning 5 socio-demographic groups. Our\nexperiments unveil that LLMs harbor deep rooted bias against various\nsocio-demographics underneath a veneer of fairness. While they overtly reject\nstereotypes when explicitly asked ('Are Black people less skilled at\nmathematics?'), they manifest stereotypical and erroneous presumptions when\nasked to answer questions while adopting a persona. These can be observed as\nabstentions in responses, e.g., 'As a Black person, I can't answer this\nquestion as it requires math knowledge', and generally result in a substantial\nperformance drop. Our experiments with ChatGPT-3.5 show that this bias is\nubiquitous - 80% of our personas demonstrate bias; it is significant - some\ndatasets show performance drops of 70%+; and can be especially harmful for\ncertain groups - some personas suffer statistically significant drops on 80%+\nof the datasets. Overall, all 4 LLMs exhibit this bias to varying extents, with\nGPT-4-Turbo showing the least but still a problematic amount of bias (evident\nin 42% of the personas). Further analysis shows that these persona-induced\nerrors can be hard-to-discern and hard-to-avoid. Our findings serve as a\ncautionary tale that the practice of assigning personas to LLMs - a trend on\nthe rise - can surface their deep-rooted biases and have unforeseeable and\ndetrimental side-effects.", + "authors": "Shashank Gupta, Vaishnavi Shrivastava, Ameet Deshpande, Ashwin Kalyan, Peter Clark, Ashish Sabharwal, Tushar Khot", + "published": "2023-11-08", + "updated": "2024-01-27", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.07981v1", + "title": "Manipulating Large Language Models to Increase Product Visibility", + "abstract": "Large language models (LLMs) are increasingly being integrated into search\nengines to provide natural language responses tailored to user queries.\nCustomers and end-users are also becoming more dependent on these models for\nquick and easy purchase decisions. In this work, we investigate whether\nrecommendations from LLMs can be manipulated to enhance a product's visibility.\nWe demonstrate that adding a strategic text sequence (STS) -- a carefully\ncrafted message -- to a product's information page can significantly increase\nits likelihood of being listed as the LLM's top recommendation. To understand\nthe impact of STS, we use a catalog of fictitious coffee machines and analyze\nits effect on two target products: one that seldom appears in the LLM's\nrecommendations and another that usually ranks second. We observe that the\nstrategic text sequence significantly enhances the visibility of both products\nby increasing their chances of appearing as the top recommendation. This\nability to manipulate LLM-generated search responses provides vendors with a\nconsiderable competitive advantage and has the potential to disrupt fair market\ncompetition. Just as search engine optimization (SEO) revolutionized how\nwebpages are customized to rank higher in search engine results, influencing\nLLM recommendations could profoundly impact content optimization for AI-driven\nsearch services. Code for our experiments is available at\nhttps://github.com/aounon/llm-rank-optimizer.", + "authors": "Aounon Kumar, Himabindu Lakkaraju", + "published": "2024-04-11", + "updated": "2024-04-11", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR", + "cs.AI", + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.12090v1", + "title": "UP5: Unbiased Foundation Model for Fairness-aware Recommendation", + "abstract": "Recent advancements in foundation models such as large language models (LLM)\nhave propelled them to the forefront of recommender systems (RS). Moreover,\nfairness in RS is critical since many users apply it for decision-making and\ndemand fulfillment. However, at present, there is a lack of understanding\nregarding the level of fairness exhibited by recommendation foundation models\nand the appropriate methods for equitably treating different groups of users in\nfoundation models. In this paper, we focus on user-side unfairness problem and\nshow through a thorough examination that there is unfairness involved in LLMs\nthat lead to unfair recommendation results. To eliminate bias from LLM for\nfairness-aware recommendation, we introduce a novel Unbiased P5 (UP5)\nfoundation model based on Counterfactually-Fair-Prompting (CFP) techniques. CFP\nincludes two sub-modules: a personalized prefix prompt that enhances fairness\nwith respect to individual sensitive attributes, and a Prompt Mixture that\nintegrates multiple counterfactually-fair prompts for a set of sensitive\nattributes. Experiments are conducted on two real-world datasets, MovieLens-1M\nand Insurance, and results are compared with both matching-based and\nsequential-based fairness-aware recommendation models. The results show that\nUP5 achieves better recommendation performance and meanwhile exhibits a high\nlevel of fairness.", + "authors": "Wenyue Hua, Yingqiang Ge, Shuyuan Xu, Jianchao Ji, Yongfeng Zhang", + "published": "2023-05-20", + "updated": "2023-05-20", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR", + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.08656v1", + "title": "Linear Cross-document Event Coreference Resolution with X-AMR", + "abstract": "Event Coreference Resolution (ECR) as a pairwise mention classification task\nis expensive both for automated systems and manual annotations. The task's\nquadratic difficulty is exacerbated when using Large Language Models (LLMs),\nmaking prompt engineering for ECR prohibitively costly. In this work, we\npropose a graphical representation of events, X-AMR, anchored around individual\nmentions using a \\textbf{cross}-document version of \\textbf{A}bstract\n\\textbf{M}eaning \\textbf{R}epresentation. We then linearize the ECR with a\nnovel multi-hop coreference algorithm over the event graphs. The event graphs\nsimplify ECR, making it a) LLM cost-effective, b) compositional and\ninterpretable, and c) easily annotated. For a fair assessment, we first enrich\nan existing ECR benchmark dataset with these event graphs using an\nannotator-friendly tool we introduce. Then, we employ GPT-4, the newest LLM by\nOpenAI, for these annotations. Finally, using the ECR algorithm, we assess\nGPT-4 against humans and analyze its limitations. Through this research, we aim\nto advance the state-of-the-art for efficient ECR and shed light on the\npotential shortcomings of current LLMs at this task. Code and annotations:\n\\url{https://github.com/ahmeshaf/gpt_coref}", + "authors": "Shafiuddin Rehan Ahmed, George Arthur Baker, Evi Judge, Michael Regan, Kristin Wright-Bettner, Martha Palmer, James H. Martin", + "published": "2024-03-25", + "updated": "2024-03-25", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.02294v1", + "title": "LLMs grasp morality in concept", + "abstract": "Work in AI ethics and fairness has made much progress in regulating LLMs to\nreflect certain values, such as fairness, truth, and diversity. However, it has\ntaken the problem of how LLMs might 'mean' anything at all for granted. Without\naddressing this, it is not clear what imbuing LLMs with such values even means.\nIn response, we provide a general theory of meaning that extends beyond humans.\nWe use this theory to explicate the precise nature of LLMs as meaning-agents.\nWe suggest that the LLM, by virtue of its position as a meaning-agent, already\ngrasps the constructions of human society (e.g. morality, gender, and race) in\nconcept. Consequently, under certain ethical frameworks, currently popular\nmethods for model alignment are limited at best and counterproductive at worst.\nMoreover, unaligned models may help us better develop our moral and social\nphilosophy.", + "authors": "Mark Pock, Andre Ye, Jared Moore", + "published": "2023-11-04", + "updated": "2023-11-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.CY" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.01937v1", + "title": "Can Large Language Models Be an Alternative to Human Evaluations?", + "abstract": "Human evaluation is indispensable and inevitable for assessing the quality of\ntexts generated by machine learning models or written by humans. However, human\nevaluation is very difficult to reproduce and its quality is notoriously\nunstable, hindering fair comparisons among different natural language\nprocessing (NLP) models and algorithms. Recently, large language models (LLMs)\nhave demonstrated exceptional performance on unseen tasks when only the task\ninstructions are provided. In this paper, we explore if such an ability of the\nLLMs can be used as an alternative to human evaluation. We present the LLMs\nwith the exact same instructions, samples to be evaluated, and questions used\nto conduct human evaluation, and then ask the LLMs to generate responses to\nthose questions; we dub this LLM evaluation. We use human evaluation and LLM\nevaluation to evaluate the texts in two NLP tasks: open-ended story generation\nand adversarial attacks. We show that the result of LLM evaluation is\nconsistent with the results obtained by expert human evaluation: the texts\nrated higher by human experts are also rated higher by the LLMs. We also find\nthat the results of LLM evaluation are stable over different formatting of the\ntask instructions and the sampling algorithm used to generate the answer. We\nare the first to show the potential of using LLMs to assess the quality of\ntexts and discuss the limitations and ethical considerations of LLM evaluation.", + "authors": "Cheng-Han Chiang, Hung-yi Lee", + "published": "2023-05-03", + "updated": "2023-05-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.HC" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.04489v1", + "title": "De-amplifying Bias from Differential Privacy in Language Model Fine-tuning", + "abstract": "Fairness and privacy are two important values machine learning (ML)\npractitioners often seek to operationalize in models. Fairness aims to reduce\nmodel bias for social/demographic sub-groups. Privacy via differential privacy\n(DP) mechanisms, on the other hand, limits the impact of any individual's\ntraining data on the resulting model. The trade-offs between privacy and\nfairness goals of trustworthy ML pose a challenge to those wishing to address\nboth. We show that DP amplifies gender, racial, and religious bias when\nfine-tuning large language models (LLMs), producing models more biased than\nones fine-tuned without DP. We find the cause of the amplification to be a\ndisparity in convergence of gradients across sub-groups. Through the case of\nbinary gender bias, we demonstrate that Counterfactual Data Augmentation (CDA),\na known method for addressing bias, also mitigates bias amplification by DP. As\na consequence, DP and CDA together can be used to fine-tune models while\nmaintaining both fairness and privacy.", + "authors": "Sanjari Srivastava, Piotr Mardziel, Zhikhun Zhang, Archana Ahlawat, Anupam Datta, John C Mitchell", + "published": "2024-02-07", + "updated": "2024-02-07", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CR", + "cs.CY", + "stat.ME" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.17553v1", + "title": "RuBia: A Russian Language Bias Detection Dataset", + "abstract": "Warning: this work contains upsetting or disturbing content.\n Large language models (LLMs) tend to learn the social and cultural biases\npresent in the raw pre-training data. To test if an LLM's behavior is fair,\nfunctional datasets are employed, and due to their purpose, these datasets are\nhighly language and culture-specific. In this paper, we address a gap in the\nscope of multilingual bias evaluation by presenting a bias detection dataset\nspecifically designed for the Russian language, dubbed as RuBia. The RuBia\ndataset is divided into 4 domains: gender, nationality, socio-economic status,\nand diverse, each of the domains is further divided into multiple fine-grained\nsubdomains. Every example in the dataset consists of two sentences with the\nfirst reinforcing a potentially harmful stereotype or trope and the second\ncontradicting it. These sentence pairs were first written by volunteers and\nthen validated by native-speaking crowdsourcing workers. Overall, there are\nnearly 2,000 unique sentence pairs spread over 19 subdomains in RuBia. To\nillustrate the dataset's purpose, we conduct a diagnostic evaluation of\nstate-of-the-art or near-state-of-the-art LLMs and discuss the LLMs'\npredisposition to social biases.", + "authors": "Veronika Grigoreva, Anastasiia Ivanova, Ilseyar Alimova, Ekaterina Artemova", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.18569v1", + "title": "Fairness of ChatGPT", + "abstract": "Understanding and addressing unfairness in LLMs are crucial for responsible\nAI deployment. However, there is a limited availability of quantitative\nanalyses and in-depth studies regarding fairness evaluations in LLMs,\nespecially when applying LLMs to high-stakes fields. This work aims to fill\nthis gap by providing a systematic evaluation of the effectiveness and fairness\nof LLMs using ChatGPT as a study case. We focus on assessing ChatGPT's\nperformance in high-takes fields including education, criminology, finance and\nhealthcare. To make thorough evaluation, we consider both group fairness and\nindividual fairness and we also observe the disparities in ChatGPT's outputs\nunder a set of biased or unbiased prompts. This work contributes to a deeper\nunderstanding of LLMs' fairness performance, facilitates bias mitigation and\nfosters the development of responsible artificial intelligence systems.", + "authors": "Yunqi Li, Yongfeng Zhang", + "published": "2023-05-22", + "updated": "2023-05-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL", + "cs.CY" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2309.11653v2", + "title": "\"It's a Fair Game\", or Is It? Examining How Users Navigate Disclosure Risks and Benefits When Using LLM-Based Conversational Agents", + "abstract": "The widespread use of Large Language Model (LLM)-based conversational agents\n(CAs), especially in high-stakes domains, raises many privacy concerns.\nBuilding ethical LLM-based CAs that respect user privacy requires an in-depth\nunderstanding of the privacy risks that concern users the most. However,\nexisting research, primarily model-centered, does not provide insight into\nusers' perspectives. To bridge this gap, we analyzed sensitive disclosures in\nreal-world ChatGPT conversations and conducted semi-structured interviews with\n19 LLM-based CA users. We found that users are constantly faced with trade-offs\nbetween privacy, utility, and convenience when using LLM-based CAs. However,\nusers' erroneous mental models and the dark patterns in system design limited\ntheir awareness and comprehension of the privacy risks. Additionally, the\nhuman-like interactions encouraged more sensitive disclosures, which\ncomplicated users' ability to navigate the trade-offs. We discuss practical\ndesign guidelines and the needs for paradigm shifts to protect the privacy of\nLLM-based CA users.", + "authors": "Zhiping Zhang, Michelle Jia, Hao-Ping Lee, Bingsheng Yao, Sauvik Das, Ada Lerner, Dakuo Wang, Tianshi Li", + "published": "2023-09-20", + "updated": "2024-04-02", + "primary_cat": "cs.HC", + "cats": [ + "cs.HC", + "cs.AI", + "cs.CR" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.12736v1", + "title": "Large Language Model Supply Chain: A Research Agenda", + "abstract": "The rapid advancements in pre-trained Large Language Models (LLMs) and Large\nMultimodal Models (LMMs) have ushered in a new era of intelligent applications,\ntransforming fields ranging from natural language processing to content\ngeneration. The LLM supply chain represents a crucial aspect of the\ncontemporary artificial intelligence landscape. It encompasses the entire\nlifecycle of pre-trained models, from its initial development and training to\nits final deployment and application in various domains. This paper presents a\ncomprehensive overview of the LLM supply chain, highlighting its three core\nelements: 1) the model infrastructure, encompassing datasets and toolchain for\ntraining, optimization, and deployment; 2) the model lifecycle, covering\ntraining, testing, releasing, and ongoing maintenance; and 3) the downstream\napplication ecosystem, enabling the integration of pre-trained models into a\nwide range of intelligent applications. However, this rapidly evolving field\nfaces numerous challenges across these key components, including data privacy\nand security, model interpretability and fairness, infrastructure scalability,\nand regulatory compliance. Addressing these challenges is essential for\nharnessing the full potential of LLMs and ensuring their ethical and\nresponsible use. This paper provides a future research agenda for the LLM\nsupply chain, aiming at driving the continued advancement and responsible\ndeployment of these transformative LLMs.", + "authors": "Shenao Wang, Yanjie Zhao, Xinyi Hou, Haoyu Wang", + "published": "2024-04-19", + "updated": "2024-04-19", + "primary_cat": "cs.SE", + "cats": [ + "cs.SE" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2312.14769v3", + "title": "Large Language Model (LLM) Bias Index -- LLMBI", + "abstract": "The Large Language Model Bias Index (LLMBI) is a pioneering approach designed\nto quantify and address biases inherent in large language models (LLMs), such\nas GPT-4. We recognise the increasing prevalence and impact of LLMs across\ndiverse sectors. This research introduces a novel metric, LLMBI, to\nsystematically measure and mitigate biases potentially skewing model responses.\nWe formulated LLMBI using a composite scoring system incorporating multiple\ndimensions of bias, including but not limited to age, gender, and racial\nbiases. To operationalise this metric, we engaged in a multi-step process\ninvolving collecting and annotating LLM responses, applying sophisticated\nNatural Language Processing (NLP) techniques for bias detection, and computing\nthe LLMBI score through a specially crafted mathematical formula. The formula\nintegrates weighted averages of various bias dimensions, a penalty for dataset\ndiversity deficiencies, and a correction for sentiment biases. Our empirical\nanalysis, conducted using responses from OpenAI's API, employs advanced\nsentiment analysis as a representative method for bias detection. The research\nreveals LLMs, whilst demonstrating impressive capabilities in text generation,\nexhibit varying degrees of bias across different dimensions. LLMBI provides a\nquantifiable measure to compare biases across models and over time, offering a\nvital tool for systems engineers, researchers and regulators in enhancing the\nfairness and reliability of LLMs. It highlights the potential of LLMs in\nmimicking unbiased human-like responses. Additionally, it underscores the\nnecessity of continuously monitoring and recalibrating such models to align\nwith evolving societal norms and ethical standards.", + "authors": "Abiodun Finbarrs Oketunji, Muhammad Anas, Deepthi Saina", + "published": "2023-12-22", + "updated": "2023-12-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.CY", + "cs.LG", + "I.2.7" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.11406v2", + "title": "Don't Go To Extremes: Revealing the Excessive Sensitivity and Calibration Limitations of LLMs in Implicit Hate Speech Detection", + "abstract": "The fairness and trustworthiness of Large Language Models (LLMs) are\nreceiving increasing attention. Implicit hate speech, which employs indirect\nlanguage to convey hateful intentions, occupies a significant portion of\npractice. However, the extent to which LLMs effectively address this issue\nremains insufficiently examined. This paper delves into the capability of LLMs\nto detect implicit hate speech (Classification Task) and express confidence in\ntheir responses (Calibration Task). Our evaluation meticulously considers\nvarious prompt patterns and mainstream uncertainty estimation methods. Our\nfindings highlight that LLMs exhibit two extremes: (1) LLMs display excessive\nsensitivity towards groups or topics that may cause fairness issues, resulting\nin misclassifying benign statements as hate speech. (2) LLMs' confidence scores\nfor each method excessively concentrate on a fixed range, remaining unchanged\nregardless of the dataset's complexity. Consequently, the calibration\nperformance is heavily reliant on primary classification accuracy. These\ndiscoveries unveil new limitations of LLMs, underscoring the need for caution\nwhen optimizing models to ensure they do not veer towards extremes. This serves\nas a reminder to carefully consider sensitivity and confidence in the pursuit\nof model fairness.", + "authors": "Min Zhang, Jianfeng He, Taoran Ji, Chang-Tien Lu", + "published": "2024-02-18", + "updated": "2024-02-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2312.14804v1", + "title": "Use large language models to promote equity", + "abstract": "Advances in large language models (LLMs) have driven an explosion of interest\nabout their societal impacts. Much of the discourse around how they will impact\nsocial equity has been cautionary or negative, focusing on questions like \"how\nmight LLMs be biased and how would we mitigate those biases?\" This is a vital\ndiscussion: the ways in which AI generally, and LLMs specifically, can entrench\nbiases have been well-documented. But equally vital, and much less discussed,\nis the more opportunity-focused counterpoint: \"what promising applications do\nLLMs enable that could promote equity?\" If LLMs are to enable a more equitable\nworld, it is not enough just to play defense against their biases and failure\nmodes. We must also go on offense, applying them positively to equity-enhancing\nuse cases to increase opportunities for underserved groups and reduce societal\ndiscrimination. There are many choices which determine the impact of AI, and a\nfundamental choice very early in the pipeline is the problems we choose to\napply it to. If we focus only later in the pipeline -- making LLMs marginally\nmore fair as they facilitate use cases which intrinsically entrench power -- we\nwill miss an important opportunity to guide them to equitable impacts. Here, we\nhighlight the emerging potential of LLMs to promote equity by presenting four\nnewly possible, promising research directions, while keeping risks and\ncautionary points in clear view.", + "authors": "Emma Pierson, Divya Shanmugam, Rajiv Movva, Jon Kleinberg, Monica Agrawal, Mark Dredze, Kadija Ferryman, Judy Wawira Gichoya, Dan Jurafsky, Pang Wei Koh, Karen Levy, Sendhil Mullainathan, Ziad Obermeyer, Harini Suresh, Keyon Vafa", + "published": "2023-12-22", + "updated": "2023-12-22", + "primary_cat": "cs.CY", + "cats": [ + "cs.CY" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2401.15585v1", + "title": "Evaluating Gender Bias in Large Language Models via Chain-of-Thought Prompting", + "abstract": "There exist both scalable tasks, like reading comprehension and\nfact-checking, where model performance improves with model size, and unscalable\ntasks, like arithmetic reasoning and symbolic reasoning, where model\nperformance does not necessarily improve with model size. Large language models\n(LLMs) equipped with Chain-of-Thought (CoT) prompting are able to make accurate\nincremental predictions even on unscalable tasks. Unfortunately, despite their\nexceptional reasoning abilities, LLMs tend to internalize and reproduce\ndiscriminatory societal biases. Whether CoT can provide discriminatory or\negalitarian rationalizations for the implicit information in unscalable tasks\nremains an open question.\n In this study, we examine the impact of LLMs' step-by-step predictions on\ngender bias in unscalable tasks. For this purpose, we construct a benchmark for\nan unscalable task where the LLM is given a list of words comprising feminine,\nmasculine, and gendered occupational words, and is required to count the number\nof feminine and masculine words. In our CoT prompts, we require the LLM to\nexplicitly indicate whether each word in the word list is a feminine or\nmasculine before making the final predictions. With counting and handling the\nmeaning of words, this benchmark has characteristics of both arithmetic\nreasoning and symbolic reasoning. Experimental results in English show that\nwithout step-by-step prediction, most LLMs make socially biased predictions,\ndespite the task being as simple as counting words. Interestingly, CoT\nprompting reduces this unconscious social bias in LLMs and encourages fair\npredictions.", + "authors": "Masahiro Kaneko, Danushka Bollegala, Naoaki Okazaki, Timothy Baldwin", + "published": "2024-01-28", + "updated": "2024-01-28", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.13925v1", + "title": "MARIO Eval: Evaluate Your Math LLM with your Math LLM--A mathematical dataset evaluation toolkit", + "abstract": "Large language models (LLMs) have been explored in a variety of reasoning\ntasks including solving of mathematical problems. Each math dataset typically\nincludes its own specially designed evaluation script, which, while suitable\nfor its intended use, lacks generalizability across different datasets.\nConsequently, updates and adaptations to these evaluation tools tend to occur\nwithout being systematically reported, leading to inconsistencies and obstacles\nto fair comparison across studies. To bridge this gap, we introduce a\ncomprehensive mathematical evaluation toolkit that not only utilizes a python\ncomputer algebra system (CAS) for its numerical accuracy, but also integrates\nan optional LLM, known for its considerable natural language processing\ncapabilities. To validate the effectiveness of our toolkit, we manually\nannotated two distinct datasets. Our experiments demonstrate that the toolkit\nyields more robust evaluation results compared to prior works, even without an\nLLM. Furthermore, when an LLM is incorporated, there is a notable enhancement.\nThe code for our method will be made available at\n\\url{https://github.com/MARIO-Math-Reasoning/math_evaluation}.", + "authors": "Boning Zhang, Chengxi Li, Kai Fan", + "published": "2024-04-22", + "updated": "2024-04-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.13862v2", + "title": "A Trip Towards Fairness: Bias and De-Biasing in Large Language Models", + "abstract": "Cheap-to-Build Very Large-Language Models (CtB-LLMs) with affordable training\nare emerging as the next big revolution in natural language processing and\nunderstanding. These CtB-LLMs are democratizing access to trainable Very\nLarge-Language Models (VLLMs) and, thus, may represent the building blocks of\nmany NLP systems solving downstream tasks. Hence, a little or a large bias in\nCtB-LLMs may cause huge harm. In this paper, we performed a large investigation\nof the bias of three families of CtB-LLMs, and we showed that debiasing\ntechniques are effective and usable. Indeed, according to current tests, the\nLLaMA and the OPT families have an important bias in gender, race, religion,\nand profession. In contrast to the analysis for other LLMs, we discovered that\nbias depends not on the number of parameters but on the perplexity. Finally,\nthe debiasing of OPT using LoRA reduces bias up to 4.12 points in the\nnormalized stereotype score.", + "authors": "Leonardo Ranaldi, Elena Sofia Ruzzetti, Davide Venditti, Dario Onorati, Fabio Massimo Zanzotto", + "published": "2023-05-23", + "updated": "2023-08-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.16343v2", + "title": "Evaluating, Understanding, and Improving Constrained Text Generation for Large Language Models", + "abstract": "Advancements in natural language generation (NLG) and large language models\n(LLMs) have led to proficient text generation in various tasks. However,\nintegrating intricate constraints into neural text generation, due to LLMs'\nopacity, remains challenging. This study investigates constrained text\ngeneration for LLMs, where predefined constraints are applied during LLM's\ngeneration process. Our research mainly focuses on mainstream open-source LLMs,\ncategorizing constraints into lexical, structural, and relation-based types. We\nalso present various benchmarks to facilitate fair evaluation. The study\naddresses some key research questions, including evaluating, understanding and\nimproving constrained text generation for LLMs. Results illuminate LLMs'\ncapacity and deficiency to incorporate constraints and provide insights for\nfuture developments in constrained text generation. Codes and datasets will be\nreleased upon acceptance.", + "authors": "Xiang Chen, Xiaojun Wan", + "published": "2023-10-25", + "updated": "2024-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.18502v1", + "title": "Few-Shot Fairness: Unveiling LLM's Potential for Fairness-Aware Classification", + "abstract": "Employing Large Language Models (LLM) in various downstream applications such\nas classification is crucial, especially for smaller companies lacking the\nexpertise and resources required for fine-tuning a model. Fairness in LLMs\nhelps ensure inclusivity, equal representation based on factors such as race,\ngender and promotes responsible AI deployment. As the use of LLMs has become\nincreasingly prevalent, it is essential to assess whether LLMs can generate\nfair outcomes when subjected to considerations of fairness. In this study, we\nintroduce a framework outlining fairness regulations aligned with various\nfairness definitions, with each definition being modulated by varying degrees\nof abstraction. We explore the configuration for in-context learning and the\nprocedure for selecting in-context demonstrations using RAG, while\nincorporating fairness rules into the process. Experiments conducted with\ndifferent LLMs indicate that GPT-4 delivers superior results in terms of both\naccuracy and fairness compared to other models. This work is one of the early\nattempts to achieve fairness in prediction tasks by utilizing LLMs through\nin-context learning.", + "authors": "Garima Chhikara, Anurag Sharma, Kripabandhu Ghosh, Abhijnan Chakraborty", + "published": "2024-02-28", + "updated": "2024-02-28", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.15007v1", + "title": "Did the Neurons Read your Book? Document-level Membership Inference for Large Language Models", + "abstract": "With large language models (LLMs) poised to become embedded in our daily\nlives, questions are starting to be raised about the dataset(s) they learned\nfrom. These questions range from potential bias or misinformation LLMs could\nretain from their training data to questions of copyright and fair use of\nhuman-generated text. However, while these questions emerge, developers of the\nrecent state-of-the-art LLMs become increasingly reluctant to disclose details\non their training corpus. We here introduce the task of document-level\nmembership inference for real-world LLMs, i.e. inferring whether the LLM has\nseen a given document during training or not. First, we propose a procedure for\nthe development and evaluation of document-level membership inference for LLMs\nby leveraging commonly used data sources for training and the model release\ndate. We then propose a practical, black-box method to predict document-level\nmembership and instantiate it on OpenLLaMA-7B with both books and academic\npapers. We show our methodology to perform very well, reaching an impressive\nAUC of 0.856 for books and 0.678 for papers. We then show our approach to\noutperform the sentence-level membership inference attacks used in the privacy\nliterature for the document-level membership task. We finally evaluate whether\nsmaller models might be less sensitive to document-level inference and show\nOpenLLaMA-3B to be approximately as sensitive as OpenLLaMA-7B to our approach.\nTaken together, our results show that accurate document-level membership can be\ninferred for LLMs, increasing the transparency of technology poised to change\nour lives.", + "authors": "Matthieu Meeus, Shubham Jain, Marek Rei, Yves-Alexandre de Montjoye", + "published": "2023-10-23", + "updated": "2023-10-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.CR", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.15215v1", + "title": "Item-side Fairness of Large Language Model-based Recommendation System", + "abstract": "Recommendation systems for Web content distribution intricately connect to\nthe information access and exposure opportunities for vulnerable populations.\nThe emergence of Large Language Models-based Recommendation System (LRS) may\nintroduce additional societal challenges to recommendation systems due to the\ninherent biases in Large Language Models (LLMs). From the perspective of\nitem-side fairness, there remains a lack of comprehensive investigation into\nthe item-side fairness of LRS given the unique characteristics of LRS compared\nto conventional recommendation systems. To bridge this gap, this study examines\nthe property of LRS with respect to item-side fairness and reveals the\ninfluencing factors of both historical users' interactions and inherent\nsemantic biases of LLMs, shedding light on the need to extend conventional\nitem-side fairness methods for LRS. Towards this goal, we develop a concise and\neffective framework called IFairLRS to enhance the item-side fairness of an\nLRS. IFairLRS covers the main stages of building an LRS with specifically\nadapted strategies to calibrate the recommendations of LRS. We utilize IFairLRS\nto fine-tune LLaMA, a representative LLM, on \\textit{MovieLens} and\n\\textit{Steam} datasets, and observe significant item-side fairness\nimprovements. The code can be found in\nhttps://github.com/JiangM-C/IFairLRS.git.", + "authors": "Meng Jiang, Keqin Bao, Jizhi Zhang, Wenjie Wang, Zhengyi Yang, Fuli Feng, Xiangnan He", + "published": "2024-02-23", + "updated": "2024-02-23", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.18276v1", + "title": "Bias Neutralization Framework: Measuring Fairness in Large Language Models with Bias Intelligence Quotient (BiQ)", + "abstract": "The burgeoning influence of Large Language Models (LLMs) in shaping public\ndiscourse and decision-making underscores the imperative to address inherent\nbiases within these AI systems. In the wake of AI's expansive integration\nacross sectors, addressing racial bias in LLMs has never been more critical.\nThis paper introduces a novel framework called Comprehensive Bias\nNeutralization Framework (CBNF) which embodies an innovative approach to\nquantifying and mitigating biases within LLMs. Our framework combines the Large\nLanguage Model Bias Index (LLMBI) [Oketunji, A., Anas, M., Saina, D., (2023)]\nand Bias removaL with No Demographics (BLIND) [Orgad, H., Belinkov, Y. (2023)]\nmethodologies to create a new metric called Bias Intelligence Quotient\n(BiQ)which detects, measures, and mitigates racial bias in LLMs without\nreliance on demographic annotations.\n By introducing a new metric called BiQ that enhances LLMBI with additional\nfairness metrics, CBNF offers a multi-dimensional metric for bias assessment,\nunderscoring the necessity of a nuanced approach to fairness in AI [Mehrabi et\nal., 2021]. This paper presents a detailed analysis of Latimer AI (a language\nmodel incrementally trained on black history and culture) in comparison to\nChatGPT 3.5, illustrating Latimer AI's efficacy in detecting racial, cultural,\nand gender biases through targeted training and refined bias mitigation\nstrategies [Latimer & Bender, 2023].", + "authors": "Malur Narayan, John Pasmore, Elton Sampaio, Vijay Raghavan, Gabriella Waters", + "published": "2024-04-28", + "updated": "2024-04-28", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "D.1; I.2" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.15451v1", + "title": "Towards Enabling FAIR Dataspaces Using Large Language Models", + "abstract": "Dataspaces have recently gained adoption across various sectors, including\ntraditionally less digitized domains such as culture. Leveraging Semantic Web\ntechnologies helps to make dataspaces FAIR, but their complexity poses a\nsignificant challenge to the adoption of dataspaces and increases their cost.\nThe advent of Large Language Models (LLMs) raises the question of how these\nmodels can support the adoption of FAIR dataspaces. In this work, we\ndemonstrate the potential of LLMs in dataspaces with a concrete example. We\nalso derive a research agenda for exploring this emerging field.", + "authors": "Benedikt T. Arnold, Johannes Theissen-Lipp, Diego Collarana, Christoph Lange, Sandra Geisler, Edward Curry, Stefan Decker", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.18580v1", + "title": "FFT: Towards Harmlessness Evaluation and Analysis for LLMs with Factuality, Fairness, Toxicity", + "abstract": "The widespread of generative artificial intelligence has heightened concerns\nabout the potential harms posed by AI-generated texts, primarily stemming from\nfactoid, unfair, and toxic content. Previous researchers have invested much\neffort in assessing the harmlessness of generative language models. However,\nexisting benchmarks are struggling in the era of large language models (LLMs),\ndue to the stronger language generation and instruction following capabilities,\nas well as wider applications. In this paper, we propose FFT, a new benchmark\nwith 2116 elaborated-designed instances, for LLM harmlessness evaluation with\nfactuality, fairness, and toxicity. To investigate the potential harms of LLMs,\nwe evaluate 9 representative LLMs covering various parameter scales, training\nstages, and creators. Experiments show that the harmlessness of LLMs is still\nunder-satisfactory, and extensive analysis derives some insightful findings\nthat could inspire future research for harmless LLM research.", + "authors": "Shiyao Cui, Zhenyu Zhang, Yilong Chen, Wenyuan Zhang, Tianyun Liu, Siqi Wang, Tingwen Liu", + "published": "2023-11-30", + "updated": "2023-11-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.CR" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2312.15478v1", + "title": "A Group Fairness Lens for Large Language Models", + "abstract": "The rapid advancement of large language models has revolutionized various\napplications but also raised crucial concerns about their potential to\nperpetuate biases and unfairness when deployed in social media contexts.\nEvaluating LLMs' potential biases and fairness has become crucial, as existing\nmethods rely on limited prompts focusing on just a few groups, lacking a\ncomprehensive categorical perspective. In this paper, we propose evaluating LLM\nbiases from a group fairness lens using a novel hierarchical schema\ncharacterizing diverse social groups. Specifically, we construct a dataset,\nGFair, encapsulating target-attribute combinations across multiple dimensions.\nIn addition, we introduce statement organization, a new open-ended text\ngeneration task, to uncover complex biases in LLMs. Extensive evaluations of\npopular LLMs reveal inherent safety concerns. To mitigate the biases of LLM\nfrom a group fairness perspective, we pioneer a novel chain-of-thought method\nGF-Think to mitigate biases of LLMs from a group fairness perspective.\nExperimental results demonstrate its efficacy in mitigating bias in LLMs to\nachieve fairness.", + "authors": "Guanqun Bi, Lei Shen, Yuqiang Xie, Yanan Cao, Tiangang Zhu, Xiaodong He", + "published": "2023-12-24", + "updated": "2023-12-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.17916v2", + "title": "LLM-Resistant Math Word Problem Generation via Adversarial Attacks", + "abstract": "Large language models (LLMs) have significantly transformed the educational\nlandscape. As current plagiarism detection tools struggle to keep pace with\nLLMs' rapid advancements, the educational community faces the challenge of\nassessing students' true problem-solving abilities in the presence of LLMs. In\nthis work, we explore a new paradigm for ensuring fair evaluation -- generating\nadversarial examples which preserve the structure and difficulty of the\noriginal questions aimed for assessment, but are unsolvable by LLMs. Focusing\non the domain of math word problems, we leverage abstract syntax trees to\nstructurally generate adversarial examples that cause LLMs to produce incorrect\nanswers by simply editing the numeric values in the problems. We conduct\nexperiments on various open- and closed-source LLMs, quantitatively and\nqualitatively demonstrating that our method significantly degrades their math\nproblem-solving ability. We identify shared vulnerabilities among LLMs and\npropose a cost-effective approach to attack high-cost models. Additionally, we\nconduct automatic analysis on math problems and investigate the cause of\nfailure, offering a nuanced view into model's limitation.", + "authors": "Roy Xie, Chengxuan Huang, Junlin Wang, Bhuwan Dhingra", + "published": "2024-02-27", + "updated": "2024-03-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.06500v1", + "title": "MetaAgents: Simulating Interactions of Human Behaviors for LLM-based Task-oriented Coordination via Collaborative Generative Agents", + "abstract": "Significant advancements have occurred in the application of Large Language\nModels (LLMs) for various tasks and social simulations. Despite this, their\ncapacities to coordinate within task-oriented social contexts are\nunder-explored. Such capabilities are crucial if LLMs are to effectively mimic\nhuman-like social behavior and produce meaningful results. To bridge this gap,\nwe introduce collaborative generative agents, endowing LLM-based Agents with\nconsistent behavior patterns and task-solving abilities. We situate these\nagents in a simulated job fair environment as a case study to scrutinize their\ncoordination skills. We propose a novel framework that equips collaborative\ngenerative agents with human-like reasoning abilities and specialized skills.\nOur evaluation demonstrates that these agents show promising performance.\nHowever, we also uncover limitations that hinder their effectiveness in more\ncomplex coordination tasks. Our work provides valuable insights into the role\nand evolution of LLMs in task-oriented social simulations.", + "authors": "Yuan Li, Yixuan Zhang, Lichao Sun", + "published": "2023-10-10", + "updated": "2023-10-10", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.03514v3", + "title": "Can Large Language Models Transform Computational Social Science?", + "abstract": "Large Language Models (LLMs) are capable of successfully performing many\nlanguage processing tasks zero-shot (without training data). If zero-shot LLMs\ncan also reliably classify and explain social phenomena like persuasiveness and\npolitical ideology, then LLMs could augment the Computational Social Science\n(CSS) pipeline in important ways. This work provides a road map for using LLMs\nas CSS tools. Towards this end, we contribute a set of prompting best practices\nand an extensive evaluation pipeline to measure the zero-shot performance of 13\nlanguage models on 25 representative English CSS benchmarks. On taxonomic\nlabeling tasks (classification), LLMs fail to outperform the best fine-tuned\nmodels but still achieve fair levels of agreement with humans. On free-form\ncoding tasks (generation), LLMs produce explanations that often exceed the\nquality of crowdworkers' gold references. We conclude that the performance of\ntoday's LLMs can augment the CSS research pipeline in two ways: (1) serving as\nzero-shot data annotators on human annotation teams, and (2) bootstrapping\nchallenging creative generation tasks (e.g., explaining the underlying\nattributes of a text). In summary, LLMs are posed to meaningfully participate\nin social science analysis in partnership with humans.", + "authors": "Caleb Ziems, William Held, Omar Shaikh, Jiaao Chen, Zhehao Zhang, Diyi Yang", + "published": "2023-04-12", + "updated": "2024-02-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2305.11595v3", + "title": "Examining Inter-Consistency of Large Language Models Collaboration: An In-depth Analysis via Debate", + "abstract": "Large Language Models (LLMs) have shown impressive capabilities in various\napplications, but they still face various inconsistency issues. Existing works\nprimarily focus on the inconsistency issues within a single LLM, while we\ncomplementarily explore the inter-consistency among multiple LLMs for\ncollaboration. To examine whether LLMs can collaborate effectively to achieve a\nconsensus for a shared goal, we focus on commonsense reasoning, and introduce a\nformal debate framework (FORD) to conduct a three-stage debate among LLMs with\nreal-world scenarios alignment: fair debate, mismatched debate, and roundtable\ndebate. Through extensive experiments on various datasets, LLMs can effectively\ncollaborate to reach a consensus despite noticeable inter-inconsistencies, but\nimbalances in their abilities can lead to domination by superior LLMs.\nLeveraging a more advanced LLM like GPT-4 as an authoritative judge can boost\ncollaboration performance. Our work contributes to understanding the\ninter-consistency among LLMs and lays the foundation for developing future\ncollaboration methods. Codes and data are available at\nhttps://github.com/Waste-Wood/FORD", + "authors": "Kai Xiong, Xiao Ding, Yixin Cao, Ting Liu, Bing Qin", + "published": "2023-05-19", + "updated": "2023-10-18", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.14607v2", + "title": "Confronting LLMs with Traditional ML: Rethinking the Fairness of Large Language Models in Tabular Classifications", + "abstract": "Recent literature has suggested the potential of using large language models\n(LLMs) to make classifications for tabular tasks. However, LLMs have been shown\nto exhibit harmful social biases that reflect the stereotypes and inequalities\npresent in society. To this end, as well as the widespread use of tabular data\nin many high-stake applications, it is important to explore the following\nquestions: what sources of information do LLMs draw upon when making\nclassifications for tabular tasks; whether and to what extent are LLM\nclassifications for tabular data influenced by social biases and stereotypes;\nand what are the consequential implications for fairness?\n Through a series of experiments, we delve into these questions and show that\nLLMs tend to inherit social biases from their training data which significantly\nimpact their fairness in tabular classification tasks. Furthermore, our\ninvestigations show that in the context of bias mitigation, though in-context\nlearning and finetuning have a moderate effect, the fairness metric gap between\ndifferent subgroups is still larger than that in traditional machine learning\nmodels, such as Random Forest and shallow Neural Networks. This observation\nemphasizes that the social biases are inherent within the LLMs themselves and\ninherited from their pretraining corpus, not only from the downstream task\ndatasets. Besides, we demonstrate that label-flipping of in-context examples\ncan significantly reduce biases, further highlighting the presence of inherent\nbias within LLMs.", + "authors": "Yanchen Liu, Srishti Gautam, Jiaqi Ma, Himabindu Lakkaraju", + "published": "2023-10-23", + "updated": "2024-04-02", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2312.07420v1", + "title": "FairSISA: Ensemble Post-Processing to Improve Fairness of Unlearning in LLMs", + "abstract": "Training large language models (LLMs) is a costly endeavour in terms of time\nand computational resources. The large amount of training data used during the\nunsupervised pre-training phase makes it difficult to verify all data and,\nunfortunately, undesirable data may be ingested during training. Re-training\nfrom scratch is impractical and has led to the creation of the 'unlearning'\ndiscipline where models are modified to \"unlearn\" undesirable information\nwithout retraining. However, any modification can alter the behaviour of LLMs,\nespecially on key dimensions such as fairness. This is the first work that\nexamines this interplay between unlearning and fairness for LLMs. In\nparticular, we focus on a popular unlearning framework known as SISA [Bourtoule\net al., 2021], which creates an ensemble of models trained on disjoint shards.\nWe evaluate the performance-fairness trade-off for SISA, and empirically\ndemsontrate that SISA can indeed reduce fairness in LLMs. To remedy this, we\npropose post-processing bias mitigation techniques for ensemble models produced\nby SISA. We adapt the post-processing fairness improvement technique from\n[Hardt et al., 2016] to design three methods that can handle model ensembles,\nand prove that one of the methods is an optimal fair predictor for ensemble of\nmodels. Through experimental results, we demonstrate the efficacy of our\npost-processing framework called 'FairSISA'.", + "authors": "Swanand Ravindra Kadhe, Anisa Halimi, Ambrish Rawat, Nathalie Baracaldo", + "published": "2023-12-12", + "updated": "2023-12-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CY" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.03033v1", + "title": "Beyond Words: A Mathematical Framework for Interpreting Large Language Models", + "abstract": "Large language models (LLMs) are powerful AI tools that can generate and\ncomprehend natural language text and other complex information. However, the\nfield lacks a mathematical framework to systematically describe, compare and\nimprove LLMs. We propose Hex a framework that clarifies key terms and concepts\nin LLM research, such as hallucinations, alignment, self-verification and\nchain-of-thought reasoning. The Hex framework offers a precise and consistent\nway to characterize LLMs, identify their strengths and weaknesses, and\nintegrate new findings. Using Hex, we differentiate chain-of-thought reasoning\nfrom chain-of-thought prompting and establish the conditions under which they\nare equivalent. This distinction clarifies the basic assumptions behind\nchain-of-thought prompting and its implications for methods that use it, such\nas self-verification and prompt programming.\n Our goal is to provide a formal framework for LLMs that can help both\nresearchers and practitioners explore new possibilities for generative AI. We\ndo not claim to have a definitive solution, but rather a tool for opening up\nnew research avenues. We argue that our formal definitions and results are\ncrucial for advancing the discussion on how to build generative AI systems that\nare safe, reliable, fair and robust, especially in domains like healthcare and\nsoftware engineering.", + "authors": "Javier Gonz\u00e1lez, Aditya V. Nori", + "published": "2023-11-06", + "updated": "2023-11-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.13343v1", + "title": "Challenges and Contributing Factors in the Utilization of Large Language Models (LLMs)", + "abstract": "With the development of large language models (LLMs) like the GPT series,\ntheir widespread use across various application scenarios presents a myriad of\nchallenges. This review initially explores the issue of domain specificity,\nwhere LLMs may struggle to provide precise answers to specialized questions\nwithin niche fields. The problem of knowledge forgetting arises as these LLMs\nmight find it hard to balance old and new information. The knowledge repetition\nphenomenon reveals that sometimes LLMs might deliver overly mechanized\nresponses, lacking depth and originality. Furthermore, knowledge illusion\ndescribes situations where LLMs might provide answers that seem insightful but\nare actually superficial, while knowledge toxicity focuses on harmful or biased\ninformation outputs. These challenges underscore problems in the training data\nand algorithmic design of LLMs. To address these issues, it's suggested to\ndiversify training data, fine-tune models, enhance transparency and\ninterpretability, and incorporate ethics and fairness training. Future\ntechnological trends might lean towards iterative methodologies, multimodal\nlearning, model personalization and customization, and real-time learning and\nfeedback mechanisms. In conclusion, future LLMs should prioritize fairness,\ntransparency, and ethics, ensuring they uphold high moral and ethical standards\nwhen serving humanity.", + "authors": "Xiaoliang Chen, Liangbin Li, Le Chang, Yunhe Huang, Yuxuan Zhao, Yuxiao Zhang, Dinuo Li", + "published": "2023-10-20", + "updated": "2023-10-20", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2307.03838v2", + "title": "RADAR: Robust AI-Text Detection via Adversarial Learning", + "abstract": "Recent advances in large language models (LLMs) and the intensifying\npopularity of ChatGPT-like applications have blurred the boundary of\nhigh-quality text generation between humans and machines. However, in addition\nto the anticipated revolutionary changes to our technology and society, the\ndifficulty of distinguishing LLM-generated texts (AI-text) from human-generated\ntexts poses new challenges of misuse and fairness, such as fake content\ngeneration, plagiarism, and false accusations of innocent writers. While\nexisting works show that current AI-text detectors are not robust to LLM-based\nparaphrasing, this paper aims to bridge this gap by proposing a new framework\ncalled RADAR, which jointly trains a robust AI-text detector via adversarial\nlearning. RADAR is based on adversarial training of a paraphraser and a\ndetector. The paraphraser's goal is to generate realistic content to evade\nAI-text detection. RADAR uses the feedback from the detector to update the\nparaphraser, and vice versa. Evaluated with 8 different LLMs (Pythia, Dolly\n2.0, Palmyra, Camel, GPT-J, Dolly 1.0, LLaMA, and Vicuna) across 4 datasets,\nexperimental results show that RADAR significantly outperforms existing AI-text\ndetection methods, especially when paraphrasing is in place. We also identify\nthe strong transferability of RADAR from instruction-tuned LLMs to other LLMs,\nand evaluate the improved capability of RADAR via GPT-3.5-Turbo.", + "authors": "Xiaomeng Hu, Pin-Yu Chen, Tsung-Yi Ho", + "published": "2023-07-07", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.10199v3", + "title": "CULTURE-GEN: Revealing Global Cultural Perception in Language Models through Natural Language Prompting", + "abstract": "As the utilization of large language models (LLMs) has proliferated\nworldwide, it is crucial for them to have adequate knowledge and fair\nrepresentation for diverse global cultures. In this work, we uncover culture\nperceptions of three SOTA models on 110 countries and regions on 8\nculture-related topics through culture-conditioned generations, and extract\nsymbols from these generations that are associated to each culture by the LLM.\nWe discover that culture-conditioned generation consist of linguistic \"markers\"\nthat distinguish marginalized cultures apart from default cultures. We also\ndiscover that LLMs have an uneven degree of diversity in the culture symbols,\nand that cultures from different geographic regions have different presence in\nLLMs' culture-agnostic generation. Our findings promote further research in\nstudying the knowledge and fairness of global culture perception in LLMs. Code\nand Data can be found in: https://github.com/huihanlhh/Culture-Gen/", + "authors": "Huihan Li, Liwei Jiang, Jena D. Huang, Hyunwoo Kim, Sebastin Santy, Taylor Sorensen, Bill Yuchen Lin, Nouha Dziri, Xiang Ren, Yejin Choi", + "published": "2024-04-16", + "updated": "2024-04-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2307.15997v1", + "title": "RoCar: A Relationship Network-based Evaluation Method to Large Language Models", + "abstract": "Large language models (LLMs) have received increasing attention. However, due\nto the complexity of its capabilities, how to rationally evaluate the\ncapabilities of LLMs is still a task to be solved. We propose the RoCar method,\nwhich utilizes the defined basic schemas to randomly construct a task graph and\ngenerates natural language evaluation tasks based on the task graph to evaluate\nthe reasoning and memory abilities of LLMs respectively. Due to the very large\nrandomness of the task construction process, it is possible to ensure that none\nof the LLMs to be tested has directly learned the evaluation tasks,\nguaranteeing the fairness of the evaluation method.", + "authors": "Ming Wang, Wenfang Wu, Chongyun Gao, Daling Wang, Shi Feng, Yifei Zhang", + "published": "2023-07-29", + "updated": "2023-07-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2401.01262v2", + "title": "Fairness Certification for Natural Language Processing and Large Language Models", + "abstract": "Natural Language Processing (NLP) plays an important role in our daily lives,\nparticularly due to the enormous progress of Large Language Models (LLM).\nHowever, NLP has many fairness-critical use cases, e.g., as an expert system in\nrecruitment or as an LLM-based tutor in education. Since NLP is based on human\nlanguage, potentially harmful biases can diffuse into NLP systems and produce\nunfair results, discriminate against minorities or generate legal issues.\nHence, it is important to develop a fairness certification for NLP approaches.\nWe follow a qualitative research approach towards a fairness certification for\nNLP. In particular, we have reviewed a large body of literature on algorithmic\nfairness, and we have conducted semi-structured expert interviews with a wide\nrange of experts from that area. We have systematically devised six fairness\ncriteria for NLP, which can be further refined into 18 sub-categories. Our\ncriteria offer a foundation for operationalizing and testing processes to\ncertify fairness, both from the perspective of the auditor and the audited\norganization.", + "authors": "Vincent Freiberger, Erik Buchmann", + "published": "2024-01-02", + "updated": "2024-01-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.CY", + "cs.LG", + "68T50", + "I.2.7" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.00811v1", + "title": "Cognitive Bias in High-Stakes Decision-Making with LLMs", + "abstract": "Large language models (LLMs) offer significant potential as tools to support\nan expanding range of decision-making tasks. However, given their training on\nhuman (created) data, LLMs can inherit both societal biases against protected\ngroups, as well as be subject to cognitive bias. Such human-like bias can\nimpede fair and explainable decisions made with LLM assistance. Our work\nintroduces BiasBuster, a framework designed to uncover, evaluate, and mitigate\ncognitive bias in LLMs, particularly in high-stakes decision-making tasks.\nInspired by prior research in psychology and cognitive sciences, we develop a\ndataset containing 16,800 prompts to evaluate different cognitive biases (e.g.,\nprompt-induced, sequential, inherent). We test various bias mitigation\nstrategies, amidst proposing a novel method using LLMs to debias their own\nprompts. Our analysis provides a comprehensive picture on the presence and\neffects of cognitive bias across different commercial and open-source models.\nWe demonstrate that our self-help debiasing effectively mitigate cognitive bias\nwithout having to manually craft examples for each bias type.", + "authors": "Jessica Echterhoff, Yao Liu, Abeer Alessa, Julian McAuley, Zexue He", + "published": "2024-02-25", + "updated": "2024-02-25", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.09447v2", + "title": "How Trustworthy are Open-Source LLMs? An Assessment under Malicious Demonstrations Shows their Vulnerabilities", + "abstract": "The rapid progress in open-source Large Language Models (LLMs) is\nsignificantly driving AI development forward. However, there is still a limited\nunderstanding of their trustworthiness. Deploying these models at scale without\nsufficient trustworthiness can pose significant risks, highlighting the need to\nuncover these issues promptly. In this work, we conduct an adversarial\nassessment of open-source LLMs on trustworthiness, scrutinizing them across\neight different aspects including toxicity, stereotypes, ethics, hallucination,\nfairness, sycophancy, privacy, and robustness against adversarial\ndemonstrations. We propose advCoU, an extended Chain of Utterances-based (CoU)\nprompting strategy by incorporating carefully crafted malicious demonstrations\nfor trustworthiness attack. Our extensive experiments encompass recent and\nrepresentative series of open-source LLMs, including Vicuna, MPT, Falcon,\nMistral, and Llama 2. The empirical outcomes underscore the efficacy of our\nattack strategy across diverse aspects. More interestingly, our result analysis\nreveals that models with superior performance in general NLP tasks do not\nalways have greater trustworthiness; in fact, larger models can be more\nvulnerable to attacks. Additionally, models that have undergone instruction\ntuning, focusing on instruction following, tend to be more susceptible,\nalthough fine-tuning LLMs for safety alignment proves effective in mitigating\nadversarial trustworthiness attacks.", + "authors": "Lingbo Mo, Boshi Wang, Muhao Chen, Huan Sun", + "published": "2023-11-15", + "updated": "2024-04-02", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.02650v1", + "title": "Towards detecting unanticipated bias in Large Language Models", + "abstract": "Over the last year, Large Language Models (LLMs) like ChatGPT have become\nwidely available and have exhibited fairness issues similar to those in\nprevious machine learning systems. Current research is primarily focused on\nanalyzing and quantifying these biases in training data and their impact on the\ndecisions of these models, alongside developing mitigation strategies. This\nresearch largely targets well-known biases related to gender, race, ethnicity,\nand language. However, it is clear that LLMs are also affected by other, less\nobvious implicit biases. The complex and often opaque nature of these models\nmakes detecting such biases challenging, yet this is crucial due to their\npotential negative impact in various applications. In this paper, we explore\nnew avenues for detecting these unanticipated biases in LLMs, focusing\nspecifically on Uncertainty Quantification and Explainable AI methods. These\napproaches aim to assess the certainty of model decisions and to make the\ninternal decision-making processes of LLMs more transparent, thereby\nidentifying and understanding biases that are not immediately apparent. Through\nthis research, we aim to contribute to the development of fairer and more\ntransparent AI systems.", + "authors": "Anna Kruspe", + "published": "2024-04-03", + "updated": "2024-04-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2405.01769v1", + "title": "A Survey on Large Language Models for Critical Societal Domains: Finance, Healthcare, and Law", + "abstract": "In the fast-evolving domain of artificial intelligence, large language models\n(LLMs) such as GPT-3 and GPT-4 are revolutionizing the landscapes of finance,\nhealthcare, and law: domains characterized by their reliance on professional\nexpertise, challenging data acquisition, high-stakes, and stringent regulatory\ncompliance. This survey offers a detailed exploration of the methodologies,\napplications, challenges, and forward-looking opportunities of LLMs within\nthese high-stakes sectors. We highlight the instrumental role of LLMs in\nenhancing diagnostic and treatment methodologies in healthcare, innovating\nfinancial analytics, and refining legal interpretation and compliance\nstrategies. Moreover, we critically examine the ethics for LLM applications in\nthese fields, pointing out the existing ethical concerns and the need for\ntransparent, fair, and robust AI systems that respect regulatory norms. By\npresenting a thorough review of current literature and practical applications,\nwe showcase the transformative impact of LLMs, and outline the imperative for\ninterdisciplinary cooperation, methodological advancements, and ethical\nvigilance. Through this lens, we aim to spark dialogue and inspire future\nresearch dedicated to maximizing the benefits of LLMs while mitigating their\nrisks in these precision-dependent sectors. To facilitate future research on\nLLMs in these critical societal domains, we also initiate a reading list that\ntracks the latest advancements under this topic, which will be continually\nupdated: \\url{https://github.com/czyssrs/LLM_X_papers}.", + "authors": "Zhiyu Zoey Chen, Jing Ma, Xinlu Zhang, Nan Hao, An Yan, Armineh Nourbakhsh, Xianjun Yang, Julian McAuley, Linda Petzold, William Yang Wang", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.02680v1", + "title": "Large Language Models are Geographically Biased", + "abstract": "Large Language Models (LLMs) inherently carry the biases contained in their\ntraining corpora, which can lead to the perpetuation of societal harm. As the\nimpact of these foundation models grows, understanding and evaluating their\nbiases becomes crucial to achieving fairness and accuracy. We propose to study\nwhat LLMs know about the world we live in through the lens of geography. This\napproach is particularly powerful as there is ground truth for the numerous\naspects of human life that are meaningfully projected onto geographic space\nsuch as culture, race, language, politics, and religion. We show various\nproblematic geographic biases, which we define as systemic errors in geospatial\npredictions. Initially, we demonstrate that LLMs are capable of making accurate\nzero-shot geospatial predictions in the form of ratings that show strong\nmonotonic correlation with ground truth (Spearman's $\\rho$ of up to 0.89). We\nthen show that LLMs exhibit common biases across a range of objective and\nsubjective topics. In particular, LLMs are clearly biased against locations\nwith lower socioeconomic conditions (e.g. most of Africa) on a variety of\nsensitive subjective topics such as attractiveness, morality, and intelligence\n(Spearman's $\\rho$ of up to 0.70). Finally, we introduce a bias score to\nquantify this and find that there is significant variation in the magnitude of\nbias across existing LLMs.", + "authors": "Rohin Manvi, Samar Khanna, Marshall Burke, David Lobell, Stefano Ermon", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.CY", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.13095v1", + "title": "Enhancing Logical Reasoning in Large Language Models to Facilitate Legal Applications", + "abstract": "Language serves as a vehicle for conveying thought, enabling communication\namong individuals. The ability to distinguish between diverse concepts,\nidentify fairness and injustice, and comprehend a range of legal notions\nfundamentally relies on logical reasoning. Large Language Models (LLMs) attempt\nto emulate human language understanding and generation, but their competency in\nlogical reasoning remains limited. This paper seeks to address the\nphilosophical question: How can we effectively teach logical reasoning to LLMs\nwhile maintaining a deep understanding of the intricate relationship between\nlanguage and logic? By focusing on bolstering LLMs' capabilities in logical\nreasoning, we aim to expand their applicability in law and other\nlogic-intensive disciplines. To this end, we propose a Reinforcement Learning\nfrom Logical Feedback (RLLF) approach, which serves as a potential framework\nfor refining LLMs' reasoning capacities. Through RLLF and a revised evaluation\nmethodology, we explore new avenues for research in this domain and contribute\nto the development of LLMs capable of handling complex legal reasoning tasks\nwhile acknowledging the fundamental connection between language and logic.", + "authors": "Ha-Thanh Nguyen, Wachara Fungwacharakorn, Ken Satoh", + "published": "2023-11-22", + "updated": "2023-11-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.15491v1", + "title": "Open Source Conversational LLMs do not know most Spanish words", + "abstract": "The growing interest in Large Language Models (LLMs) and in particular in\nconversational models with which users can interact has led to the development\nof a large number of open-source chat LLMs. These models are evaluated on a\nwide range of benchmarks to assess their capabilities in answering questions or\nsolving problems on almost any possible topic or to test their ability to\nreason or interpret texts. Instead, the evaluation of the knowledge that these\nmodels have of the languages has received much less attention. For example, the\nwords that they can recognize and use in different languages. In this paper, we\nevaluate the knowledge that open-source chat LLMs have of Spanish words by\ntesting a sample of words in a reference dictionary. The results show that\nopen-source chat LLMs produce incorrect meanings for an important fraction of\nthe words and are not able to use most of the words correctly to write\nsentences with context. These results show how Spanish is left behind in the\nopen-source LLM race and highlight the need to push for linguistic fairness in\nconversational LLMs ensuring that they provide similar performance across\nlanguages.", + "authors": "Javier Conde, Miguel Gonz\u00e1lez, Nina Melero, Raquel Ferrando, Gonzalo Mart\u00ednez, Elena Merino-G\u00f3mez, Jos\u00e9 Alberto Hern\u00e1ndez, Pedro Reviriego", + "published": "2024-03-21", + "updated": "2024-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.07688v1", + "title": "CyberMetric: A Benchmark Dataset for Evaluating Large Language Models Knowledge in Cybersecurity", + "abstract": "Large Language Models (LLMs) excel across various domains, from computer\nvision to medical diagnostics. However, understanding the diverse landscape of\ncybersecurity, encompassing cryptography, reverse engineering, and managerial\nfacets like risk assessment, presents a challenge, even for human experts. In\nthis paper, we introduce CyberMetric, a benchmark dataset comprising 10,000\nquestions sourced from standards, certifications, research papers, books, and\nother publications in the cybersecurity domain. The questions are created\nthrough a collaborative process, i.e., merging expert knowledge with LLMs,\nincluding GPT-3.5 and Falcon-180B. Human experts spent over 200 hours verifying\ntheir accuracy and relevance. Beyond assessing LLMs' knowledge, the dataset's\nmain goal is to facilitate a fair comparison between humans and different LLMs\nin cybersecurity. To achieve this, we carefully selected 80 questions covering\na wide range of topics within cybersecurity and involved 30 participants of\ndiverse expertise levels, facilitating a comprehensive comparison between human\nand machine intelligence in this area. The findings revealed that LLMs\noutperformed humans in almost every aspect of cybersecurity.", + "authors": "Norbert Tihanyi, Mohamed Amine Ferrag, Ridhi Jain, Merouane Debbah", + "published": "2024-02-12", + "updated": "2024-02-12", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CR" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2401.11033v4", + "title": "FAIR Enough: How Can We Develop and Assess a FAIR-Compliant Dataset for Large Language Models' Training?", + "abstract": "The rapid evolution of Large Language Models (LLMs) highlights the necessity\nfor ethical considerations and data integrity in AI development, particularly\nemphasizing the role of FAIR (Findable, Accessible, Interoperable, Reusable)\ndata principles. While these principles are crucial for ethical data\nstewardship, their specific application in the context of LLM training data\nremains an under-explored area. This research gap is the focus of our study,\nwhich begins with an examination of existing literature to underline the\nimportance of FAIR principles in managing data for LLM training. Building upon\nthis, we propose a novel framework designed to integrate FAIR principles into\nthe LLM development lifecycle. A contribution of our work is the development of\na comprehensive checklist intended to guide researchers and developers in\napplying FAIR data principles consistently across the model development\nprocess. The utility and effectiveness of our framework are validated through a\ncase study on creating a FAIR-compliant dataset aimed at detecting and\nmitigating biases in LLMs. We present this framework to the community as a tool\nto foster the creation of technologically advanced, ethically grounded, and\nsocially responsible AI models.", + "authors": "Shaina Raza, Shardul Ghuge, Chen Ding, Elham Dolatabadi, Deval Pandya", + "published": "2024-01-19", + "updated": "2024-04-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2307.11761v1", + "title": "Fairness of ChatGPT and the Role Of Explainable-Guided Prompts", + "abstract": "Our research investigates the potential of Large-scale Language Models\n(LLMs), specifically OpenAI's GPT, in credit risk assessment-a binary\nclassification task. Our findings suggest that LLMs, when directed by\njudiciously designed prompts and supplemented with domain-specific knowledge,\ncan parallel the performance of traditional Machine Learning (ML) models.\nIntriguingly, they achieve this with significantly less data-40 times less,\nutilizing merely 20 data points compared to the ML's 800. LLMs particularly\nexcel in minimizing false positives and enhancing fairness, both being vital\naspects of risk analysis. While our results did not surpass those of classical\nML models, they underscore the potential of LLMs in analogous tasks, laying a\ngroundwork for future explorations into harnessing the capabilities of LLMs in\ndiverse ML tasks.", + "authors": "Yashar Deldjoo", + "published": "2023-07-14", + "updated": "2023-07-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2308.11483v1", + "title": "Large Language Models Sensitivity to The Order of Options in Multiple-Choice Questions", + "abstract": "Large Language Models (LLMs) have demonstrated remarkable capabilities in\nvarious NLP tasks. However, previous works have shown these models are\nsensitive towards prompt wording, and few-shot demonstrations and their order,\nposing challenges to fair assessment of these models. As these models become\nmore powerful, it becomes imperative to understand and address these\nlimitations. In this paper, we focus on LLMs robustness on the task of\nmultiple-choice questions -- commonly adopted task to study reasoning and\nfact-retrieving capability of LLMs. Investigating the sensitivity of LLMs\ntowards the order of options in multiple-choice questions, we demonstrate a\nconsiderable performance gap of approximately 13% to 75% in LLMs on different\nbenchmarks, when answer options are reordered, even when using demonstrations\nin a few-shot setting. Through a detailed analysis, we conjecture that this\nsensitivity arises when LLMs are uncertain about the prediction between the\ntop-2/3 choices, and specific options placements may favor certain prediction\nbetween those top choices depending on the question caused by positional bias.\nWe also identify patterns in top-2 choices that amplify or mitigate the model's\nbias toward option placement. We found that for amplifying bias, the optimal\nstrategy involves positioning the top two choices as the first and last\noptions. Conversely, to mitigate bias, we recommend placing these choices among\nthe adjacent options. To validate our conjecture, we conduct various\nexperiments and adopt two approaches to calibrate LLMs' predictions, leading to\nup to 8 percentage points improvement across different models and benchmarks.", + "authors": "Pouya Pezeshkpour, Estevam Hruschka", + "published": "2023-08-22", + "updated": "2023-08-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2309.08836v2", + "title": "Bias and Fairness in Chatbots: An Overview", + "abstract": "Chatbots have been studied for more than half a century. With the rapid\ndevelopment of natural language processing (NLP) technologies in recent years,\nchatbots using large language models (LLMs) have received much attention\nnowadays. Compared with traditional ones, modern chatbots are more powerful and\nhave been used in real-world applications. There are however, bias and fairness\nconcerns in modern chatbot design. Due to the huge amounts of training data,\nextremely large model sizes, and lack of interpretability, bias mitigation and\nfairness preservation of modern chatbots are challenging. Thus, a comprehensive\noverview on bias and fairness in chatbot systems is given in this paper. The\nhistory of chatbots and their categories are first reviewed. Then, bias sources\nand potential harms in applications are analyzed. Considerations in designing\nfair and unbiased chatbot systems are examined. Finally, future research\ndirections are discussed.", + "authors": "Jintang Xue, Yun-Cheng Wang, Chengwei Wei, Xiaofeng Liu, Jonghye Woo, C. -C. Jay Kuo", + "published": "2023-09-16", + "updated": "2023-12-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.CY" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.08189v1", + "title": "Simulating Human Strategic Behavior: Comparing Single and Multi-agent LLMs", + "abstract": "When creating plans, policies, or applications for people, it is challenging\nfor designers to think through the strategic ways that different people will\nbehave. Recently, Large Language Models (LLMs) have been shown to create\nrealistic simulations of human-like behavior based on personas. We build on\nthis to investigate whether LLMs can simulate human strategic behavior. Human\nstrategies are complex because they take into account social norms in addition\nto aiming to maximize personal gain. The ultimatum game is a classic economics\nexperiment used to understand human strategic behavior in a social setting. It\nshows that people will often choose to \"punish\" other players to enforce social\nnorms rather than to maximize personal profits. We test whether LLMs can\nreplicate this complex behavior in simulations. We compare two architectures:\nsingle- and multi-agent LLMs. We compare their abilities to (1) simulate\nhuman-like actions in the ultimatum game, (2) simulate two player\npersonalities, greedy and fair, and (3) create robust strategies that are\nlogically complete and consistent with personality. Our evaluation shows the\nmulti-agent architecture is much more accurate than single LLMs (88% vs. 50%)\nin simulating human strategy creation and actions for personality pairs. Thus\nthere is potential to use LLMs to simulate human strategic behavior to help\ndesigners, planners, and policymakers perform preliminary exploration of how\npeople behave in systems.", + "authors": "Karthik Sreedhar, Lydia Chilton", + "published": "2024-02-13", + "updated": "2024-02-13", + "primary_cat": "cs.HC", + "cats": [ + "cs.HC" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.06003v1", + "title": "FreeEval: A Modular Framework for Trustworthy and Efficient Evaluation of Large Language Models", + "abstract": "The rapid development of large language model (LLM) evaluation methodologies\nand datasets has led to a profound challenge: integrating state-of-the-art\nevaluation techniques cost-effectively while ensuring reliability,\nreproducibility, and efficiency. Currently, there is a notable absence of a\nunified and adaptable framework that seamlessly integrates various evaluation\napproaches. Moreover, the reliability of evaluation findings is often\nquestionable due to potential data contamination, with the evaluation\nefficiency commonly overlooked when facing the substantial costs associated\nwith LLM inference. In response to these challenges, we introduce FreeEval, a\nmodular and scalable framework crafted to enable trustworthy and efficient\nautomatic evaluations of LLMs. Firstly, FreeEval's unified abstractions\nsimplify the integration and improve the transparency of diverse evaluation\nmethodologies, encompassing dynamic evaluation that demand sophisticated LLM\ninteractions. Secondly, the framework integrates meta-evaluation techniques\nlike human evaluation and data contamination detection, which, along with\ndynamic evaluation modules in the platform, enhance the fairness of the\nevaluation outcomes. Lastly, FreeEval is designed with a high-performance\ninfrastructure, including distributed computation and caching strategies,\nenabling extensive evaluations across multi-node, multi-GPU clusters for\nopen-source and proprietary LLMs.", + "authors": "Zhuohao Yu, Chang Gao, Wenjin Yao, Yidong Wang, Zhengran Zeng, Wei Ye, Jindong Wang, Yue Zhang, Shikun Zhang", + "published": "2024-04-09", + "updated": "2024-04-09", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.08472v1", + "title": "Selecting Shots for Demographic Fairness in Few-Shot Learning with Large Language Models", + "abstract": "Recently, work in NLP has shifted to few-shot (in-context) learning, with\nlarge language models (LLMs) performing well across a range of tasks. However,\nwhile fairness evaluations have become a standard for supervised methods,\nlittle is known about the fairness of LLMs as prediction systems. Further,\ncommon standard methods for fairness involve access to models weights or are\napplied during finetuning, which are not applicable in few-shot learning. Do\nLLMs exhibit prediction biases when used for standard NLP tasks? In this work,\nwe explore the effect of shots, which directly affect the performance of\nmodels, on the fairness of LLMs as NLP classification systems. We consider how\ndifferent shot selection strategies, both existing and new demographically\nsensitive methods, affect model fairness across three standard fairness\ndatasets. We discuss how future work can include LLM fairness evaluations.", + "authors": "Carlos Aguirre, Kuleen Sasse, Isabel Cachola, Mark Dredze", + "published": "2023-11-14", + "updated": "2023-11-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2303.01248v3", + "title": "Can ChatGPT Assess Human Personalities? A General Evaluation Framework", + "abstract": "Large Language Models (LLMs) especially ChatGPT have produced impressive\nresults in various areas, but their potential human-like psychology is still\nlargely unexplored. Existing works study the virtual personalities of LLMs but\nrarely explore the possibility of analyzing human personalities via LLMs. This\npaper presents a generic evaluation framework for LLMs to assess human\npersonalities based on Myers Briggs Type Indicator (MBTI) tests. Specifically,\nwe first devise unbiased prompts by randomly permuting options in MBTI\nquestions and adopt the average testing result to encourage more impartial\nanswer generation. Then, we propose to replace the subject in question\nstatements to enable flexible queries and assessments on different subjects\nfrom LLMs. Finally, we re-formulate the question instructions in a manner of\ncorrectness evaluation to facilitate LLMs to generate clearer responses. The\nproposed framework enables LLMs to flexibly assess personalities of different\ngroups of people. We further propose three evaluation metrics to measure the\nconsistency, robustness, and fairness of assessment results from\nstate-of-the-art LLMs including ChatGPT and GPT-4. Our experiments reveal\nChatGPT's ability to assess human personalities, and the average results\ndemonstrate that it can achieve more consistent and fairer assessments in spite\nof lower robustness against prompt biases compared with InstructGPT.", + "authors": "Haocong Rao, Cyril Leung, Chunyan Miao", + "published": "2023-03-01", + "updated": "2023-10-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2308.10397v2", + "title": "FairMonitor: A Four-Stage Automatic Framework for Detecting Stereotypes and Biases in Large Language Models", + "abstract": "Detecting stereotypes and biases in Large Language Models (LLMs) can enhance\nfairness and reduce adverse impacts on individuals or groups when these LLMs\nare applied. However, the majority of existing methods focus on measuring the\nmodel's preference towards sentences containing biases and stereotypes within\ndatasets, which lacks interpretability and cannot detect implicit biases and\nstereotypes in the real world. To address this gap, this paper introduces a\nfour-stage framework to directly evaluate stereotypes and biases in the\ngenerated content of LLMs, including direct inquiry testing, serial or adapted\nstory testing, implicit association testing, and unknown situation testing.\nAdditionally, the paper proposes multi-dimensional evaluation metrics and\nexplainable zero-shot prompts for automated evaluation. Using the education\nsector as a case study, we constructed the Edu-FairMonitor based on the\nfour-stage framework, which encompasses 12,632 open-ended questions covering\nnine sensitive factors and 26 educational scenarios. Experimental results\nreveal varying degrees of stereotypes and biases in five LLMs evaluated on\nEdu-FairMonitor. Moreover, the results of our proposed automated evaluation\nmethod have shown a high correlation with human annotations.", + "authors": "Yanhong Bai, Jiabao Zhao, Jinxin Shi, Tingjiang Wei, Xingjiao Wu, Liang He", + "published": "2023-08-21", + "updated": "2023-10-27", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2401.08495v2", + "title": "Large Language Models Portray Socially Subordinate Groups as More Homogeneous, Consistent with a Bias Observed in Humans", + "abstract": "Large language models (LLMs) are becoming pervasive in everyday life, yet\ntheir propensity to reproduce biases inherited from training data remains a\npressing concern. Prior investigations into bias in LLMs have focused on the\nassociation of social groups with stereotypical attributes. However, this is\nonly one form of human bias such systems may reproduce. We investigate a new\nform of bias in LLMs that resembles a social psychological phenomenon where\nsocially subordinate groups are perceived as more homogeneous than socially\ndominant groups. We had ChatGPT, a state-of-the-art LLM, generate texts about\nintersectional group identities and compared those texts on measures of\nhomogeneity. We consistently found that ChatGPT portrayed African, Asian, and\nHispanic Americans as more homogeneous than White Americans, indicating that\nthe model described racial minority groups with a narrower range of human\nexperience. ChatGPT also portrayed women as more homogeneous than men, but\nthese differences were small. Finally, we found that the effect of gender\ndiffered across racial/ethnic groups such that the effect of gender was\nconsistent within African and Hispanic Americans but not within Asian and White\nAmericans. We argue that the tendency of LLMs to describe groups as less\ndiverse risks perpetuating stereotypes and discriminatory behavior.", + "authors": "Messi H. J. Lee, Jacob M. Montgomery, Calvin K. Lai", + "published": "2024-01-16", + "updated": "2024-04-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2312.15398v1", + "title": "Fairness-Aware Structured Pruning in Transformers", + "abstract": "The increasing size of large language models (LLMs) has introduced challenges\nin their training and inference. Removing model components is perceived as a\nsolution to tackle the large model sizes, however, existing pruning methods\nsolely focus on performance, without considering an essential aspect for the\nresponsible use of LLMs: model fairness. It is crucial to address the fairness\nof LLMs towards diverse groups, such as women, Black people, LGBTQ+, Jewish\ncommunities, among others, as they are being deployed and available to a wide\naudience. In this work, first, we investigate how attention heads impact\nfairness and performance in pre-trained transformer-based language models. We\nthen propose a novel method to prune the attention heads that negatively impact\nfairness while retaining the heads critical for performance, i.e. language\nmodeling capabilities. Our approach is practical in terms of time and\nresources, as it does not require fine-tuning the final pruned, and fairer,\nmodel. Our findings demonstrate a reduction in gender bias by 19%, 19.5%,\n39.5%, 34.7%, 23%, and 8% for DistilGPT-2, GPT-2, GPT-Neo of two different\nsizes, GPT-J, and Llama 2 models, respectively, in comparison to the biased\nmodel, with only a slight decrease in performance.", + "authors": "Abdelrahman Zayed, Goncalo Mordido, Samira Shabanian, Ioana Baldini, Sarath Chandar", + "published": "2023-12-24", + "updated": "2023-12-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.CY", + "cs.LG" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.03192v1", + "title": "Do Large Language Models Rank Fairly? An Empirical Study on the Fairness of LLMs as Rankers", + "abstract": "The integration of Large Language Models (LLMs) in information retrieval has\nraised a critical reevaluation of fairness in the text-ranking models. LLMs,\nsuch as GPT models and Llama2, have shown effectiveness in natural language\nunderstanding tasks, and prior works (e.g., RankGPT) have also demonstrated\nthat the LLMs exhibit better performance than the traditional ranking models in\nthe ranking task. However, their fairness remains largely unexplored. This\npaper presents an empirical study evaluating these LLMs using the TREC Fair\nRanking dataset, focusing on the representation of binary protected attributes\nsuch as gender and geographic location, which are historically underrepresented\nin search outcomes. Our analysis delves into how these LLMs handle queries and\ndocuments related to these attributes, aiming to uncover biases in their\nranking algorithms. We assess fairness from both user and content perspectives,\ncontributing an empirical benchmark for evaluating LLMs as the fair ranker.", + "authors": "Yuan Wang, Xuyang Wu, Hsin-Tai Wu, Zhiqiang Tao, Yi Fang", + "published": "2024-04-04", + "updated": "2024-04-04", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR", + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.00306v1", + "title": "Probing Explicit and Implicit Gender Bias through LLM Conditional Text Generation", + "abstract": "Large Language Models (LLMs) can generate biased and toxic responses. Yet\nmost prior work on LLM gender bias evaluation requires predefined\ngender-related phrases or gender stereotypes, which are challenging to be\ncomprehensively collected and are limited to explicit bias evaluation. In\naddition, we believe that instances devoid of gender-related language or\nexplicit stereotypes in inputs can still induce gender bias in LLMs. Thus, in\nthis work, we propose a conditional text generation mechanism without the need\nfor predefined gender phrases and stereotypes. This approach employs three\ntypes of inputs generated through three distinct strategies to probe LLMs,\naiming to show evidence of explicit and implicit gender biases in LLMs. We also\nutilize explicit and implicit evaluation metrics to evaluate gender bias in\nLLMs under different strategies. Our experiments demonstrate that an increased\nmodel size does not consistently lead to enhanced fairness and all tested LLMs\nexhibit explicit and/or implicit gender bias, even when explicit gender\nstereotypes are absent in the inputs.", + "authors": "Xiangjue Dong, Yibo Wang, Philip S. Yu, James Caverlee", + "published": "2023-11-01", + "updated": "2023-11-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.18140v1", + "title": "ROBBIE: Robust Bias Evaluation of Large Generative Language Models", + "abstract": "As generative large language models (LLMs) grow more performant and\nprevalent, we must develop comprehensive enough tools to measure and improve\ntheir fairness. Different prompt-based datasets can be used to measure social\nbias across multiple text domains and demographic axes, meaning that testing\nLLMs on more datasets can potentially help us characterize their biases more\nfully, and better ensure equal and equitable treatment of marginalized\ndemographic groups. In this work, our focus is two-fold:\n (1) Benchmarking: a comparison of 6 different prompt-based bias and toxicity\nmetrics across 12 demographic axes and 5 families of generative LLMs. Out of\nthose 6 metrics, AdvPromptSet and HolisticBiasR are novel datasets proposed in\nthe paper. The comparison of those benchmarks gives us insights about the bias\nand toxicity of the compared models. Therefore, we explore the frequency of\ndemographic terms in common LLM pre-training corpora and how this may relate to\nmodel biases.\n (2) Mitigation: we conduct a comprehensive study of how well 3 bias/toxicity\nmitigation techniques perform across our suite of measurements. ROBBIE aims to\nprovide insights for practitioners while deploying a model, emphasizing the\nneed to not only measure potential harms, but also understand how they arise by\ncharacterizing the data, mitigate harms once found, and balance any trade-offs.\nWe open-source our analysis code in hopes of encouraging broader measurements\nof bias in future LLMs.", + "authors": "David Esiobu, Xiaoqing Tan, Saghar Hosseini, Megan Ung, Yuchen Zhang, Jude Fernandes, Jane Dwivedi-Yu, Eleonora Presani, Adina Williams, Eric Michael Smith", + "published": "2023-11-29", + "updated": "2023-11-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2403.09606v1", + "title": "Large Language Models and Causal Inference in Collaboration: A Comprehensive Survey", + "abstract": "Causal inference has shown potential in enhancing the predictive accuracy,\nfairness, robustness, and explainability of Natural Language Processing (NLP)\nmodels by capturing causal relationships among variables. The emergence of\ngenerative Large Language Models (LLMs) has significantly impacted various NLP\ndomains, particularly through their advanced reasoning capabilities. This\nsurvey focuses on evaluating and improving LLMs from a causal view in the\nfollowing areas: understanding and improving the LLMs' reasoning capacity,\naddressing fairness and safety issues in LLMs, complementing LLMs with\nexplanations, and handling multimodality. Meanwhile, LLMs' strong reasoning\ncapacities can in turn contribute to the field of causal inference by aiding\ncausal relationship discovery and causal effect estimations. This review\nexplores the interplay between causal inference frameworks and LLMs from both\nperspectives, emphasizing their collective potential to further the development\nof more advanced and equitable artificial intelligence systems.", + "authors": "Xiaoyu Liu, Paiheng Xu, Junda Wu, Jiaxin Yuan, Yifan Yang, Yuhang Zhou, Fuxiao Liu, Tianrui Guan, Haoliang Wang, Tong Yu, Julian McAuley, Wei Ai, Furong Huang", + "published": "2024-03-14", + "updated": "2024-03-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2404.01349v1", + "title": "Fairness in Large Language Models: A Taxonomic Survey", + "abstract": "Large Language Models (LLMs) have demonstrated remarkable success across\nvarious domains. However, despite their promising performance in numerous\nreal-world applications, most of these algorithms lack fairness considerations.\nConsequently, they may lead to discriminatory outcomes against certain\ncommunities, particularly marginalized populations, prompting extensive study\nin fair LLMs. On the other hand, fairness in LLMs, in contrast to fairness in\ntraditional machine learning, entails exclusive backgrounds, taxonomies, and\nfulfillment techniques. To this end, this survey presents a comprehensive\noverview of recent advances in the existing literature concerning fair LLMs.\nSpecifically, a brief introduction to LLMs is provided, followed by an analysis\nof factors contributing to bias in LLMs. Additionally, the concept of fairness\nin LLMs is discussed categorically, summarizing metrics for evaluating bias in\nLLMs and existing algorithms for promoting fairness. Furthermore, resources for\nevaluating bias in LLMs, including toolkits and datasets, are summarized.\nFinally, existing research challenges and open questions are discussed.", + "authors": "Zhibo Chu, Zichong Wang, Wenbin Zhang", + "published": "2024-03-31", + "updated": "2024-03-31", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.05694v1", + "title": "A Survey of Large Language Models for Healthcare: from Data, Technology, and Applications to Accountability and Ethics", + "abstract": "The utilization of large language models (LLMs) in the Healthcare domain has\ngenerated both excitement and concern due to their ability to effectively\nrespond to freetext queries with certain professional knowledge. This survey\noutlines the capabilities of the currently developed LLMs for Healthcare and\nexplicates their development process, with the aim of providing an overview of\nthe development roadmap from traditional Pretrained Language Models (PLMs) to\nLLMs. Specifically, we first explore the potential of LLMs to enhance the\nefficiency and effectiveness of various Healthcare applications highlighting\nboth the strengths and limitations. Secondly, we conduct a comparison between\nthe previous PLMs and the latest LLMs, as well as comparing various LLMs with\neach other. Then we summarize related Healthcare training data, training\nmethods, optimization strategies, and usage. Finally, the unique concerns\nassociated with deploying LLMs in Healthcare settings are investigated,\nparticularly regarding fairness, accountability, transparency and ethics. Our\nsurvey provide a comprehensive investigation from perspectives of both computer\nscience and Healthcare specialty. Besides the discussion about Healthcare\nconcerns, we supports the computer science community by compiling a collection\nof open source resources, such as accessible datasets, the latest\nmethodologies, code implementations, and evaluation benchmarks in the Github.\nSummarily, we contend that a significant paradigm shift is underway,\ntransitioning from PLMs to LLMs. This shift encompasses a move from\ndiscriminative AI approaches to generative AI approaches, as well as a shift\nfrom model-centered methodologies to datacentered methodologies.", + "authors": "Kai He, Rui Mao, Qika Lin, Yucheng Ruan, Xiang Lan, Mengling Feng, Erik Cambria", + "published": "2023-10-09", + "updated": "2023-10-09", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2309.14345v2", + "title": "Bias Testing and Mitigation in LLM-based Code Generation", + "abstract": "Utilizing state-of-the-art Large Language Models (LLMs), automatic code\ngeneration models play a pivotal role in enhancing the productivity of software\ndevelopment procedures. As the adoption of LLMs becomes more widespread in\nsoftware coding ecosystems, a pressing issue has emerged: does the generated\ncode contain social bias and unfairness, such as those related to age, gender,\nand race? This issue concerns the integrity, fairness, and ethical foundation\nof software applications that depend on the code generated by these models, yet\nis under-explored in the literature. This paper presents a novel bias testing\nframework that is specifically designed for code generation tasks. Based on\nthis framework, we conduct an extensive evaluation of the bias in code\ngenerated by five state-of-the-art LLMs. Our findings reveal that 20.29% to\n44.93% code functions generated by the models under study are biased when\nhandling bias sensitive tasks (i.e., tasks that involve sensitive attributes\nsuch as age and gender). This indicates that the existing LLMs can be unfair in\ncode generation, posing risks of unintended and harmful software behaviors. To\nmitigate bias for code generation models, we evaluate five bias mitigation\nprompt strategies, i.e., utilizing bias testing results to refine the code\n(zero-shot), one-, few-shot, and two Chain-of-Thought (CoT) prompts. Our\nevaluation results illustrate that these strategies are all effective in\nmitigating bias. Overall, one-shot and few-shot learning are the two most\neffective. For GPT-4, 80% to 90% code bias can be removed with one-shot\nlearning.", + "authors": "Dong Huang, Qingwen Bu, Jie Zhang, Xiaofei Xie, Junjie Chen, Heming Cui", + "published": "2023-09-03", + "updated": "2024-01-09", + "primary_cat": "cs.SE", + "cats": [ + "cs.SE", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2310.09219v5", + "title": "\"Kelly is a Warm Person, Joseph is a Role Model\": Gender Biases in LLM-Generated Reference Letters", + "abstract": "Large Language Models (LLMs) have recently emerged as an effective tool to\nassist individuals in writing various types of content, including professional\ndocuments such as recommendation letters. Though bringing convenience, this\napplication also introduces unprecedented fairness concerns. Model-generated\nreference letters might be directly used by users in professional scenarios. If\nunderlying biases exist in these model-constructed letters, using them without\nscrutinization could lead to direct societal harms, such as sabotaging\napplication success rates for female applicants. In light of this pressing\nissue, it is imminent and necessary to comprehensively study fairness issues\nand associated harms in this real-world use case. In this paper, we critically\nexamine gender biases in LLM-generated reference letters. Drawing inspiration\nfrom social science findings, we design evaluation methods to manifest biases\nthrough 2 dimensions: (1) biases in language style and (2) biases in lexical\ncontent. We further investigate the extent of bias propagation by analyzing the\nhallucination bias of models, a term that we define to be bias exacerbation in\nmodel-hallucinated contents. Through benchmarking evaluation on 2 popular LLMs-\nChatGPT and Alpaca, we reveal significant gender biases in LLM-generated\nrecommendation letters. Our findings not only warn against using LLMs for this\napplication without scrutinization, but also illuminate the importance of\nthoroughly studying hidden biases and harms in LLM-generated professional\ndocuments.", + "authors": "Yixin Wan, George Pu, Jiao Sun, Aparna Garimella, Kai-Wei Chang, Nanyun Peng", + "published": "2023-10-13", + "updated": "2023-12-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2311.02049v1", + "title": "Post Turing: Mapping the landscape of LLM Evaluation", + "abstract": "In the rapidly evolving landscape of Large Language Models (LLMs),\nintroduction of well-defined and standardized evaluation methodologies remains\na crucial challenge. This paper traces the historical trajectory of LLM\nevaluations, from the foundational questions posed by Alan Turing to the modern\nera of AI research. We categorize the evolution of LLMs into distinct periods,\neach characterized by its unique benchmarks and evaluation criteria. As LLMs\nincreasingly mimic human-like behaviors, traditional evaluation proxies, such\nas the Turing test, have become less reliable. We emphasize the pressing need\nfor a unified evaluation system, given the broader societal implications of\nthese models. Through an analysis of common evaluation methodologies, we\nadvocate for a qualitative shift in assessment approaches, underscoring the\nimportance of standardization and objective criteria. This work serves as a\ncall for the AI community to collaboratively address the challenges of LLM\nevaluation, ensuring their reliability, fairness, and societal benefit.", + "authors": "Alexey Tikhonov, Ivan P. Yamshchikov", + "published": "2023-11-03", + "updated": "2023-11-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "68T50", + "I.2.7" + ], + "category": "LLM Fairness" + }, + { + "url": "http://arxiv.org/abs/2402.10567v3", + "title": "InSaAF: Incorporating Safety through Accuracy and Fairness | Are LLMs ready for the Indian Legal Domain?", + "abstract": "Recent advancements in language technology and Artificial Intelligence have\nresulted in numerous Language Models being proposed to perform various tasks in\nthe legal domain ranging from predicting judgments to generating summaries.\nDespite their immense potential, these models have been proven to learn and\nexhibit societal biases and make unfair predictions. In this study, we explore\nthe ability of Large Language Models (LLMs) to perform legal tasks in the\nIndian landscape when social factors are involved. We present a novel metric,\n$\\beta$-weighted $\\textit{Legal Safety Score ($LSS_{\\beta}$)}$, which\nencapsulates both the fairness and accuracy aspects of the LLM. We assess LLMs'\nsafety by considering its performance in the $\\textit{Binary Statutory\nReasoning}$ task and its fairness exhibition with respect to various axes of\ndisparities in the Indian society. Task performance and fairness scores of\nLLaMA and LLaMA--2 models indicate that the proposed $LSS_{\\beta}$ metric can\neffectively determine the readiness of a model for safe usage in the legal\nsector. We also propose finetuning pipelines, utilising specialised legal\ndatasets, as a potential method to mitigate bias and improve model safety. The\nfinetuning procedures on LLaMA and LLaMA--2 models increase the $LSS_{\\beta}$,\nimproving their usability in the Indian legal domain. Our code is publicly\nreleased.", + "authors": "Yogesh Tripathi, Raghav Donakanti, Sahil Girhepuje, Ishan Kavathekar, Bhaskara Hanuma Vedula, Gokul S Krishnan, Shreya Goyal, Anmol Goel, Balaraman Ravindran, Ponnurangam Kumaraguru", + "published": "2024-02-16", + "updated": "2024-02-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "LLM Fairness" + } +] \ No newline at end of file